From 9aec2758cc29d27c31dcb0b4bb040484a885ef23 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 2 May 2023 15:01:45 -0700 Subject: [PATCH 001/205] stage2: start the InternPool transition Instead of doing everything at once which is a hopelessly large task, this introduces a piecemeal transition that can be done in small increments at a time. This is a minimal changeset that keeps the compiler compiling. It only uses the InternPool for a small set of types. Behavior tests are not passing. Air.Inst.Ref and Zir.Inst.Ref are separated into different enums but compile-time verified to have the same fields in the same order. The large set of changes is mainly to deal with the fact that most Type and Value methods now require a Module to be passed in, so that the InternPool object can be accessed. --- src/Air.zig | 125 +- src/AstGen.zig | 6 +- src/Autodoc.zig | 2 - src/Compilation.zig | 3 +- src/InternPool.zig | 518 +++++++- src/Liveness.zig | 8 +- src/Module.zig | 335 ++++- src/RangeSet.zig | 13 +- src/Sema.zig | 2383 ++++++++++++++++++---------------- src/TypedValue.zig | 42 +- src/Zir.zig | 518 ++------ src/arch/aarch64/CodeGen.zig | 351 ++--- src/arch/aarch64/abi.zig | 36 +- src/arch/arm/CodeGen.zig | 342 ++--- src/arch/arm/abi.zig | 41 +- src/arch/riscv64/CodeGen.zig | 72 +- src/arch/riscv64/abi.zig | 18 +- src/arch/sparc64/CodeGen.zig | 239 ++-- src/arch/wasm/CodeGen.zig | 966 +++++++------- src/arch/wasm/abi.zig | 41 +- src/arch/x86_64/CodeGen.zig | 774 +++++------ src/arch/x86_64/abi.zig | 82 +- src/codegen.zig | 204 ++- src/codegen/c.zig | 727 ++++++----- src/codegen/c/type.zig | 150 ++- src/codegen/llvm.zig | 1557 +++++++++++----------- src/codegen/spirv.zig | 208 +-- src/link/Coff.zig | 7 +- src/link/Dwarf.zig | 103 +- src/link/Elf.zig | 9 +- src/link/MachO.zig | 11 +- src/link/Plan9.zig | 9 +- src/link/Wasm.zig | 16 +- src/print_air.zig | 8 +- src/print_zir.zig | 12 +- src/target.zig | 128 -- src/type.zig | 1313 ++++++++++--------- src/value.zig | 880 ++++++------- 38 files changed, 6473 insertions(+), 5784 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 7ee36206f131..b60e8eda9ddb 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -5,10 +5,12 @@ const std = @import("std"); const builtin = @import("builtin"); -const Value = @import("value.zig").Value; -const Type = @import("type.zig").Type; const assert = std.debug.assert; + const Air = @This(); +const Value = @import("value.zig").Value; +const Type = @import("type.zig").Type; +const InternPool = @import("InternPool.zig"); instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. @@ -837,7 +839,88 @@ pub const Inst = struct { /// The position of an AIR instruction within the `Air` instructions array. pub const Index = u32; - pub const Ref = @import("Zir.zig").Inst.Ref; + pub const Ref = enum(u32) { + u1_type = @enumToInt(InternPool.Index.u1_type), + u8_type = @enumToInt(InternPool.Index.u8_type), + i8_type = @enumToInt(InternPool.Index.i8_type), + u16_type = @enumToInt(InternPool.Index.u16_type), + i16_type = @enumToInt(InternPool.Index.i16_type), + u29_type = @enumToInt(InternPool.Index.u29_type), + u32_type = @enumToInt(InternPool.Index.u32_type), + i32_type = @enumToInt(InternPool.Index.i32_type), + u64_type = @enumToInt(InternPool.Index.u64_type), + i64_type = @enumToInt(InternPool.Index.i64_type), + u80_type = @enumToInt(InternPool.Index.u80_type), + u128_type = @enumToInt(InternPool.Index.u128_type), + i128_type = @enumToInt(InternPool.Index.i128_type), + usize_type = @enumToInt(InternPool.Index.usize_type), + isize_type = @enumToInt(InternPool.Index.isize_type), + c_char_type = @enumToInt(InternPool.Index.c_char_type), + c_short_type = @enumToInt(InternPool.Index.c_short_type), + c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type), + c_int_type = @enumToInt(InternPool.Index.c_int_type), + c_uint_type = @enumToInt(InternPool.Index.c_uint_type), + c_long_type = @enumToInt(InternPool.Index.c_long_type), + c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type), + c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type), + c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type), + c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type), + f16_type = @enumToInt(InternPool.Index.f16_type), + f32_type = @enumToInt(InternPool.Index.f32_type), + f64_type = @enumToInt(InternPool.Index.f64_type), + f80_type = @enumToInt(InternPool.Index.f80_type), + f128_type = @enumToInt(InternPool.Index.f128_type), + anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type), + bool_type = @enumToInt(InternPool.Index.bool_type), + void_type = @enumToInt(InternPool.Index.void_type), + type_type = @enumToInt(InternPool.Index.type_type), + anyerror_type = @enumToInt(InternPool.Index.anyerror_type), + comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type), + comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type), + noreturn_type = @enumToInt(InternPool.Index.noreturn_type), + anyframe_type = @enumToInt(InternPool.Index.anyframe_type), + null_type = @enumToInt(InternPool.Index.null_type), + undefined_type = @enumToInt(InternPool.Index.undefined_type), + enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type), + atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type), + atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type), + calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type), + address_space_type = @enumToInt(InternPool.Index.address_space_type), + float_mode_type = @enumToInt(InternPool.Index.float_mode_type), + reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type), + call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type), + prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type), + export_options_type = @enumToInt(InternPool.Index.export_options_type), + extern_options_type = @enumToInt(InternPool.Index.extern_options_type), + type_info_type = @enumToInt(InternPool.Index.type_info_type), + manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type), + manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), + single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), + const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), + anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), + generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), + var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), + empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), + undef = @enumToInt(InternPool.Index.undef), + zero = @enumToInt(InternPool.Index.zero), + zero_usize = @enumToInt(InternPool.Index.zero_usize), + one = @enumToInt(InternPool.Index.one), + one_usize = @enumToInt(InternPool.Index.one_usize), + calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), + calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), + void_value = @enumToInt(InternPool.Index.void_value), + unreachable_value = @enumToInt(InternPool.Index.unreachable_value), + null_value = @enumToInt(InternPool.Index.null_value), + bool_true = @enumToInt(InternPool.Index.bool_true), + bool_false = @enumToInt(InternPool.Index.bool_false), + empty_struct = @enumToInt(InternPool.Index.empty_struct), + generic_poison = @enumToInt(InternPool.Index.generic_poison), + + /// This Ref does not correspond to any AIR instruction or constant + /// value and may instead be used as a sentinel to indicate null. + none = std.math.maxInt(u32), + _, + }; /// All instructions have an 8-byte payload, which is contained within /// this union. `Tag` determines which union field is active, as well as @@ -1066,10 +1149,13 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type { const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[ref_int].ty; + if (ref_int < InternPool.static_keys.len) { + return .{ + .ip_index = InternPool.static_keys[ref_int].typeOf(), + .legacy = undefined, + }; } - return air.typeOfIndex(@intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len)); + return air.typeOfIndex(ref_int - ref_start_index); } pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { @@ -1286,11 +1372,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand); - switch (callee_ty.zigTypeTag()) { - .Fn => return callee_ty.fnReturnType(), - .Pointer => return callee_ty.childType().fnReturnType(), - else => unreachable, - } + return callee_ty.fnReturnType(); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { @@ -1328,11 +1410,11 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const ref_int = @enumToInt(ref); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - var buffer: Value.ToTypeBuffer = undefined; - return Air.Inst.Ref.typed_value_map[ref_int].val.toType(&buffer); + if (ref_int < ref_start_index) { + const ip_index = @intToEnum(InternPool.Index, ref_int); + return ip_index.toType(); } - const inst_index = ref_int - Air.Inst.Ref.typed_value_map.len; + const inst_index = ref_int - ref_start_index; const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); assert(air_tags[inst_index] == .const_ty); @@ -1367,7 +1449,7 @@ pub fn deinit(air: *Air, gpa: std.mem.Allocator) void { air.* = undefined; } -const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; +pub const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { return @intToEnum(Air.Inst.Ref, ref_start_index + inst); @@ -1383,17 +1465,18 @@ pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { } /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Air.Inst.Ref) ?Value { +pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const @import("Module.zig")) ?Value { const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[ref_int].val; + if (ref_int < ref_start_index) { + const ip_index = @intToEnum(InternPool.Index, ref_int); + return ip_index.toValue(); } - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index); const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { .constant => return air.values[air_datas[inst_index].ty_pl.payload], .const_ty => unreachable, - else => return air.typeOfIndex(inst_index).onePossibleValue(), + else => return air.typeOfIndex(inst_index).onePossibleValue(mod), } } diff --git a/src/AstGen.zig b/src/AstGen.zig index b38067fd03db..6461b11d8075 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -8530,7 +8530,7 @@ fn builtinCall( return rvalue(gz, ri, result, node); }, .call => { - const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .modifier_type } }, params[0]); + const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .call_modifier_type } }, params[0]); const callee = try expr(gz, scope, .{ .rl = .none }, params[1]); const args = try expr(gz, scope, .{ .rl = .none }, params[2]); const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{ @@ -10298,10 +10298,6 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.noreturn_type), as_ty | @enumToInt(Zir.Inst.Ref.null_type), as_ty | @enumToInt(Zir.Inst.Ref.undefined_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_noreturn_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_void_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_naked_noreturn_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_ccc_void_no_args_type), as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type), as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type), diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 879f0a6b1540..c20c5771dd42 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -95,8 +95,6 @@ pub fn generateZirData(self: *Autodoc) !void { } } - log.debug("Ref map size: {}", .{Ref.typed_value_map.len}); - const root_src_dir = self.comp_module.main_pkg.root_src_directory; const root_src_path = self.comp_module.main_pkg.root_src_path; const joined_src_path = try root_src_dir.join(self.arena, &.{root_src_path}); diff --git a/src/Compilation.zig b/src/Compilation.zig index cbdc789d40c9..15e393c35ce4 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1317,7 +1317,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .emit_h = emit_h, .error_name_list = .{}, }; - try module.error_name_list.append(gpa, "(no error)"); + try module.init(); break :blk module; } else blk: { @@ -2064,6 +2064,7 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void if (!build_options.only_c and !build_options.only_core_functionality) { if (comp.emit_docs) |doc_location| { if (comp.bin_file.options.module) |module| { + if (true) @panic("TODO: get autodoc working again in this branch"); var autodoc = Autodoc.init(module, doc_location); defer autodoc.deinit(); try autodoc.generateZirData(); diff --git a/src/InternPool.zig b/src/InternPool.zig index 74155ca657c1..b835315e5a1f 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1,11 +1,16 @@ +//! All interned objects have both a value and a type. + map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, -const InternPool = @This(); const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; +const BigIntConst = std.math.big.int.Const; + +const InternPool = @This(); +const DeclIndex = enum(u32) { _ }; const KeyAdapter = struct { intern_pool: *const InternPool, @@ -17,24 +22,21 @@ const KeyAdapter = struct { pub fn hash(ctx: @This(), a: Key) u32 { _ = ctx; - return a.hash(); + return a.hash32(); } }; pub const Key = union(enum) { - int_type: struct { - signedness: std.builtin.Signedness, - bits: u16, - }, + int_type: IntType, ptr_type: struct { elem_type: Index, - sentinel: Index, - alignment: u16, + sentinel: Index = .none, + alignment: u16 = 0, size: std.builtin.Type.Pointer.Size, - is_const: bool, - is_volatile: bool, - is_allowzero: bool, - address_space: std.builtin.AddressSpace, + is_const: bool = false, + is_volatile: bool = false, + is_allowzero: bool = false, + address_space: std.builtin.AddressSpace = .generic, }, array_type: struct { len: u64, @@ -52,20 +54,52 @@ pub const Key = union(enum) { error_set_type: Index, payload_type: Index, }, - simple: Simple, + simple_type: SimpleType, + simple_value: SimpleValue, + extern_func: struct { + ty: Index, + /// The Decl that corresponds to the function itself. + owner_decl: DeclIndex, + /// Library name if specified. + /// For example `extern "c" fn write(...) usize` would have 'c' as library name. + /// Index into the string table bytes. + lib_name: u32, + }, + int: struct { + ty: Index, + big_int: BigIntConst, + }, + enum_tag: struct { + ty: Index, + tag: BigIntConst, + }, + struct_type: struct { + fields_len: u32, + // TODO move Module.Struct data to here + }, + + pub const IntType = std.builtin.Type.Int; - pub fn hash(key: Key) u32 { + pub fn hash32(key: Key) u32 { + return @truncate(u32, key.hash64()); + } + + pub fn hash64(key: Key) u64 { var hasher = std.hash.Wyhash.init(0); + key.hashWithHasher(&hasher); + return hasher.final(); + } + + pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash) void { switch (key) { .int_type => |int_type| { - std.hash.autoHash(&hasher, int_type); + std.hash.autoHash(hasher, int_type); }, .array_type => |array_type| { - std.hash.autoHash(&hasher, array_type); + std.hash.autoHash(hasher, array_type); }, else => @panic("TODO"), } - return @truncate(u32, hasher.final()); } pub fn eql(a: Key, b: Key) bool { @@ -85,6 +119,34 @@ pub const Key = union(enum) { else => @panic("TODO"), } } + + pub fn typeOf(key: Key) Index { + switch (key) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .optional_type, + .error_union_type, + .simple_type, + .struct_type, + => return .type_type, + + .int => |x| return x.ty, + .extern_func => |x| return x.ty, + .enum_tag => |x| return x.ty, + + .simple_value => |s| switch (s) { + .undefined => return .undefined_type, + .void => return .void_type, + .null => return .null_type, + .false, .true => return .bool_type, + .empty_struct => return .empty_struct_type, + .@"unreachable" => return .noreturn_type, + .generic_poison => unreachable, + }, + } + } }; pub const Item = struct { @@ -98,11 +160,330 @@ pub const Item = struct { /// Two values which have the same type can be equality compared simply /// by checking if their indexes are equal, provided they are both in /// the same `InternPool`. +/// When adding a tag to this enum, consider adding a corresponding entry to +/// `primitives` in AstGen.zig. pub const Index = enum(u32) { + u1_type, + u8_type, + i8_type, + u16_type, + i16_type, + u29_type, + u32_type, + i32_type, + u64_type, + i64_type, + u80_type, + u128_type, + i128_type, + usize_type, + isize_type, + c_char_type, + c_short_type, + c_ushort_type, + c_int_type, + c_uint_type, + c_long_type, + c_ulong_type, + c_longlong_type, + c_ulonglong_type, + c_longdouble_type, + f16_type, + f32_type, + f64_type, + f80_type, + f128_type, + anyopaque_type, + bool_type, + void_type, + type_type, + anyerror_type, + comptime_int_type, + comptime_float_type, + noreturn_type, + anyframe_type, + null_type, + undefined_type, + enum_literal_type, + atomic_order_type, + atomic_rmw_op_type, + calling_convention_type, + address_space_type, + float_mode_type, + reduce_op_type, + call_modifier_type, + prefetch_options_type, + export_options_type, + extern_options_type, + type_info_type, + manyptr_u8_type, + manyptr_const_u8_type, + single_const_pointer_to_comptime_int_type, + const_slice_u8_type, + anyerror_void_error_union_type, + generic_poison_type, + var_args_param_type, + empty_struct_type, + + /// `undefined` (untyped) + undef, + /// `0` (comptime_int) + zero, + /// `0` (usize) + zero_usize, + /// `1` (comptime_int) + one, + /// `1` (usize) + one_usize, + /// `std.builtin.CallingConvention.C` + calling_convention_c, + /// `std.builtin.CallingConvention.Inline` + calling_convention_inline, + /// `{}` + void_value, + /// `unreachable` (noreturn type) + unreachable_value, + /// `null` (untyped) + null_value, + /// `true` + bool_true, + /// `false` + bool_false, + /// `.{}` (untyped) + empty_struct, + /// Used for generic parameters where the type and value + /// is not known until generic function instantiation. + generic_poison, + none = std.math.maxInt(u32), + _, + + pub fn toType(i: Index) @import("type.zig").Type { + assert(i != .none); + return .{ + .ip_index = i, + .legacy = undefined, + }; + } + + pub fn toValue(i: Index) @import("value.zig").Value { + assert(i != .none); + return .{ + .ip_index = i, + .legacy = undefined, + }; + } +}; + +pub const static_keys = [_]Key{ + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 1, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 8, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 8, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 16, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 16, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 29, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 32, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 32, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 64, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 64, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 80, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 128, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 128, + } }, + + .{ .simple_type = .usize }, + .{ .simple_type = .isize }, + .{ .simple_type = .c_char }, + .{ .simple_type = .c_short }, + .{ .simple_type = .c_ushort }, + .{ .simple_type = .c_int }, + .{ .simple_type = .c_uint }, + .{ .simple_type = .c_long }, + .{ .simple_type = .c_ulong }, + .{ .simple_type = .c_longlong }, + .{ .simple_type = .c_ulonglong }, + .{ .simple_type = .c_longdouble }, + .{ .simple_type = .f16 }, + .{ .simple_type = .f32 }, + .{ .simple_type = .f64 }, + .{ .simple_type = .f80 }, + .{ .simple_type = .f128 }, + .{ .simple_type = .anyopaque }, + .{ .simple_type = .bool }, + .{ .simple_type = .void }, + .{ .simple_type = .type }, + .{ .simple_type = .anyerror }, + .{ .simple_type = .comptime_int }, + .{ .simple_type = .comptime_float }, + .{ .simple_type = .noreturn }, + .{ .simple_type = .@"anyframe" }, + .{ .simple_type = .null }, + .{ .simple_type = .undefined }, + .{ .simple_type = .enum_literal }, + .{ .simple_type = .atomic_order }, + .{ .simple_type = .atomic_rmw_op }, + .{ .simple_type = .calling_convention }, + .{ .simple_type = .address_space }, + .{ .simple_type = .float_mode }, + .{ .simple_type = .reduce_op }, + .{ .simple_type = .call_modifier }, + .{ .simple_type = .prefetch_options }, + .{ .simple_type = .export_options }, + .{ .simple_type = .extern_options }, + .{ .simple_type = .type_info }, + + .{ .ptr_type = .{ + .elem_type = .u8_type, + .size = .Many, + } }, + + .{ .ptr_type = .{ + .elem_type = .u8_type, + .size = .Many, + .is_const = true, + } }, + + .{ .ptr_type = .{ + .elem_type = .comptime_int_type, + .size = .One, + .is_const = true, + } }, + + .{ .ptr_type = .{ + .elem_type = .u8_type, + .size = .Slice, + .is_const = true, + } }, + + .{ .error_union_type = .{ + .error_set_type = .anyerror_type, + .payload_type = .void_type, + } }, + + // generic_poison_type + .{ .simple_type = .generic_poison }, + + // var_args_param_type + .{ .simple_type = .var_args_param }, + + // empty_struct_type + .{ .struct_type = .{ + .fields_len = 0, + } }, + + .{ .simple_value = .undefined }, + + .{ .int = .{ + .ty = .comptime_int_type, + .big_int = .{ + .limbs = &.{0}, + .positive = true, + }, + } }, + + .{ .int = .{ + .ty = .usize_type, + .big_int = .{ + .limbs = &.{0}, + .positive = true, + }, + } }, + + .{ .int = .{ + .ty = .comptime_int_type, + .big_int = .{ + .limbs = &.{1}, + .positive = true, + }, + } }, + + .{ .int = .{ + .ty = .usize_type, + .big_int = .{ + .limbs = &.{1}, + .positive = true, + }, + } }, + + .{ .enum_tag = .{ + .ty = .calling_convention_type, + .tag = .{ + .limbs = &.{@enumToInt(std.builtin.CallingConvention.C)}, + .positive = true, + }, + } }, + + .{ .enum_tag = .{ + .ty = .calling_convention_type, + .tag = .{ + .limbs = &.{@enumToInt(std.builtin.CallingConvention.Inline)}, + .positive = true, + }, + } }, + + .{ .simple_value = .void }, + .{ .simple_value = .@"unreachable" }, + .{ .simple_value = .null }, + .{ .simple_value = .true }, + .{ .simple_value = .false }, + .{ .simple_value = .empty_struct }, + .{ .simple_value = .generic_poison }, }; +/// How many items in the InternPool are statically known. +pub const static_len: u32 = static_keys.len; + pub const Tag = enum(u8) { /// An integer type. /// data is number of bits @@ -113,9 +494,12 @@ pub const Tag = enum(u8) { /// An array type. /// data is payload to Array. type_array, - /// A type or value that can be represented with only an enum tag. - /// data is Simple enum value - simple, + /// A type that can be represented with only an enum tag. + /// data is SimpleType enum value. + simple_type, + /// A value that can be represented with only an enum tag. + /// data is SimpleValue enum value. + simple_value, /// An unsigned integer value that can be represented by u32. /// data is integer value int_u32, @@ -137,9 +521,20 @@ pub const Tag = enum(u8) { /// A float value that can be represented by f128. /// data is payload index to Float128. float_f128, + /// An extern function. + extern_func, + /// A regular function. + func, + /// Represents the data that an enum declaration provides, when the fields + /// are auto-numbered, and there are no declarations. + /// data is payload index to `EnumSimple`. + enum_simple, }; -pub const Simple = enum(u32) { +/// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to +/// implement logic that only wants to deal with types because the logic can +/// ignore all simple values. Note that technically, types are values. +pub const SimpleType = enum(u32) { f16, f32, f64, @@ -147,6 +542,7 @@ pub const Simple = enum(u32) { f128, usize, isize, + c_char, c_short, c_ushort, c_int, @@ -165,14 +561,36 @@ pub const Simple = enum(u32) { comptime_float, noreturn, @"anyframe", - null_type, - undefined_type, - enum_literal_type, + null, undefined, - void_value, + enum_literal, + + atomic_order, + atomic_rmw_op, + calling_convention, + address_space, + float_mode, + reduce_op, + call_modifier, + prefetch_options, + export_options, + extern_options, + type_info, + + generic_poison, + var_args_param, +}; + +pub const SimpleValue = enum(u32) { + undefined, + void, null, - bool_true, - bool_false, + empty_struct, + true, + false, + @"unreachable", + + generic_poison, }; pub const Array = struct { @@ -180,10 +598,44 @@ pub const Array = struct { child: Index, }; +/// Trailing: +/// 0. field name: null-terminated string index for each fields_len; declaration order +pub const EnumSimple = struct { + /// The Decl that corresponds to the enum itself. + owner_decl: DeclIndex, + /// An integer type which is used for the numerical value of the enum. This + /// is inferred by Zig to be the smallest power of two unsigned int that + /// fits the number of fields. It is stored here to avoid unnecessary + /// calculations and possibly allocation failure when querying the tag type + /// of enums. + int_tag_ty: Index, + fields_len: u32, +}; + +pub fn init(ip: *InternPool, gpa: Allocator) !void { + assert(ip.items.len == 0); + + // So that we can use `catch unreachable` below. + try ip.items.ensureUnusedCapacity(gpa, static_keys.len); + try ip.map.ensureUnusedCapacity(gpa, static_keys.len); + try ip.extra.ensureUnusedCapacity(gpa, static_keys.len); + + // This inserts all the statically-known values into the intern pool in the + // order expected. + for (static_keys) |key| _ = ip.get(gpa, key) catch unreachable; + + // Sanity check. + assert(ip.indexToKey(.bool_true).simple_value == .true); + assert(ip.indexToKey(.bool_false).simple_value == .false); + + assert(ip.items.len == static_keys.len); +} + pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.map.deinit(gpa); ip.items.deinit(gpa); ip.extra.deinit(gpa); + ip.* = undefined; } pub fn indexToKey(ip: InternPool, index: Index) Key { @@ -210,7 +662,8 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .sentinel = .none, } }; }, - .simple => .{ .simple = @intToEnum(Simple, data) }, + .simple_type => .{ .simple_type = @intToEnum(SimpleType, data) }, + .simple_value => .{ .simple_value = @intToEnum(SimpleValue, data) }, else => @panic("TODO"), }; @@ -224,12 +677,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } switch (key) { .int_type => |int_type| { - const tag: Tag = switch (int_type.signedness) { + const t: Tag = switch (int_type.signedness) { .signed => .type_int_signed, .unsigned => .type_int_unsigned, }; try ip.items.append(gpa, .{ - .tag = tag, + .tag = t, .data = int_type.bits, }); }, @@ -249,6 +702,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } +pub fn tag(ip: InternPool, index: Index) Tag { + const tags = ip.items.items(.tag); + return tags[@enumToInt(index)]; +} + fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32 { const fields = std.meta.fields(@TypeOf(extra)); try ip.extra.ensureUnusedCapacity(gpa, fields.len); diff --git a/src/Liveness.zig b/src/Liveness.zig index 59135ef5c87a..45d0705008f9 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -5,15 +5,17 @@ //! Some instructions are special, such as: //! * Conditional Branches //! * Switch Branches -const Liveness = @This(); const std = @import("std"); -const trace = @import("tracy.zig").trace; const log = std.log.scoped(.liveness); const assert = std.debug.assert; const Allocator = std.mem.Allocator; -const Air = @import("Air.zig"); const Log2Int = std.math.Log2Int; +const Liveness = @This(); +const trace = @import("tracy.zig").trace; +const Air = @import("Air.zig"); +const InternPool = @import("InternPool.zig"); + pub const Verify = @import("Liveness/Verify.zig"); /// This array is split into sets of 4 bits per AIR instruction. diff --git a/src/Module.zig b/src/Module.zig index a8f2281c4f64..5756955d3c1c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -32,6 +32,19 @@ const build_options = @import("build_options"); const Liveness = @import("Liveness.zig"); const isUpDir = @import("introspect.zig").isUpDir; const clang = @import("clang.zig"); +const InternPool = @import("InternPool.zig"); + +comptime { + @setEvalBranchQuota(4000); + for ( + @typeInfo(Zir.Inst.Ref).Enum.fields, + @typeInfo(Air.Inst.Ref).Enum.fields, + @typeInfo(InternPool.Index).Enum.fields, + ) |zir_field, air_field, ip_field| { + assert(mem.eql(u8, zir_field.name, ip_field.name)); + assert(mem.eql(u8, air_field.name, ip_field.name)); + } +} /// General-purpose allocator. Used for both temporary and long-term storage. gpa: Allocator, @@ -83,6 +96,9 @@ embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, string_literal_table: std.HashMapUnmanaged(StringLiteralContext.Key, Decl.OptionalIndex, StringLiteralContext, std.hash_map.default_max_load_percentage) = .{}, string_literal_bytes: ArrayListUnmanaged(u8) = .{}, +/// Stores all Type and Value objects; periodically garbage collected. +intern_pool: InternPool = .{}, + /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. @@ -807,9 +823,9 @@ pub const Decl = struct { return (try decl.typedValue()).val; } - pub fn isFunction(decl: Decl) !bool { + pub fn isFunction(decl: Decl, mod: *const Module) !bool { const tv = try decl.typedValue(); - return tv.ty.zigTypeTag() == .Fn; + return tv.ty.zigTypeTag(mod) == .Fn; } /// If the Decl has a value and it is a struct, return it, @@ -921,14 +937,14 @@ pub const Decl = struct { }; } - pub fn getAlignment(decl: Decl, target: Target) u32 { + pub fn getAlignment(decl: Decl, mod: *const Module) u32 { assert(decl.has_tv); if (decl.@"align" != 0) { // Explicit alignment. return decl.@"align"; } else { // Natural alignment. - return decl.ty.abiAlignment(target); + return decl.ty.abiAlignment(mod); } } }; @@ -1030,7 +1046,7 @@ pub const Struct = struct { /// Returns the field alignment. If the struct is packed, returns 0. pub fn alignment( field: Field, - target: Target, + mod: *const Module, layout: std.builtin.Type.ContainerLayout, ) u32 { if (field.abi_align != 0) { @@ -1038,24 +1054,26 @@ pub const Struct = struct { return field.abi_align; } + const target = mod.getTarget(); + switch (layout) { .Packed => return 0, .Auto => { if (target.ofmt == .c) { - return alignmentExtern(field, target); + return alignmentExtern(field, mod); } else { - return field.ty.abiAlignment(target); + return field.ty.abiAlignment(mod); } }, - .Extern => return alignmentExtern(field, target), + .Extern => return alignmentExtern(field, mod), } } - pub fn alignmentExtern(field: Field, target: Target) u32 { + pub fn alignmentExtern(field: Field, mod: *const Module) u32 { // This logic is duplicated in Type.abiAlignmentAdvanced. - const ty_abi_align = field.ty.abiAlignment(target); + const ty_abi_align = field.ty.abiAlignment(mod); - if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) { + if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { // The C ABI requires 128 bit integer fields of structs // to be 16-bytes aligned. return @max(ty_abi_align, 16); @@ -1132,7 +1150,7 @@ pub const Struct = struct { }; } - pub fn packedFieldBitOffset(s: Struct, target: Target, index: usize) u16 { + pub fn packedFieldBitOffset(s: Struct, mod: *const Module, index: usize) u16 { assert(s.layout == .Packed); assert(s.haveLayout()); var bit_sum: u64 = 0; @@ -1140,12 +1158,13 @@ pub const Struct = struct { if (i == index) { return @intCast(u16, bit_sum); } - bit_sum += field.ty.bitSize(target); + bit_sum += field.ty.bitSize(mod); } unreachable; // index out of bounds } pub const RuntimeFieldIterator = struct { + module: *const Module, struct_obj: *const Struct, index: u32 = 0, @@ -1155,6 +1174,7 @@ pub const Struct = struct { }; pub fn next(it: *RuntimeFieldIterator) ?FieldAndIndex { + const mod = it.module; while (true) { var i = it.index; it.index += 1; @@ -1167,15 +1187,18 @@ pub const Struct = struct { } const field = it.struct_obj.fields.values()[i]; - if (!field.is_comptime and field.ty.hasRuntimeBits()) { + if (!field.is_comptime and field.ty.hasRuntimeBits(mod)) { return FieldAndIndex{ .index = i, .field = field }; } } } }; - pub fn runtimeFieldIterator(s: *const Struct) RuntimeFieldIterator { - return .{ .struct_obj = s }; + pub fn runtimeFieldIterator(s: *const Struct, module: *const Module) RuntimeFieldIterator { + return .{ + .struct_obj = s, + .module = module, + }; } }; @@ -1323,9 +1346,9 @@ pub const Union = struct { /// Returns the field alignment, assuming the union is not packed. /// Keep implementation in sync with `Sema.unionFieldAlignment`. /// Prefer to call that function instead of this one during Sema. - pub fn normalAlignment(field: Field, target: Target) u32 { + pub fn normalAlignment(field: Field, mod: *const Module) u32 { if (field.abi_align == 0) { - return field.ty.abiAlignment(target); + return field.ty.abiAlignment(mod); } else { return field.abi_align; } @@ -1383,22 +1406,22 @@ pub const Union = struct { }; } - pub fn hasAllZeroBitFieldTypes(u: Union) bool { + pub fn hasAllZeroBitFieldTypes(u: Union, mod: *const Module) bool { assert(u.haveFieldTypes()); for (u.fields.values()) |field| { - if (field.ty.hasRuntimeBits()) return false; + if (field.ty.hasRuntimeBits(mod)) return false; } return true; } - pub fn mostAlignedField(u: Union, target: Target) u32 { + pub fn mostAlignedField(u: Union, mod: *const Module) u32 { assert(u.haveFieldTypes()); var most_alignment: u32 = 0; var most_index: usize = undefined; for (u.fields.values(), 0..) |field, i| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; - const field_align = field.normalAlignment(target); + const field_align = field.normalAlignment(mod); if (field_align > most_alignment) { most_alignment = field_align; most_index = i; @@ -1408,20 +1431,20 @@ pub const Union = struct { } /// Returns 0 if the union is represented with 0 bits at runtime. - pub fn abiAlignment(u: Union, target: Target, have_tag: bool) u32 { + pub fn abiAlignment(u: Union, mod: *const Module, have_tag: bool) u32 { var max_align: u32 = 0; - if (have_tag) max_align = u.tag_ty.abiAlignment(target); + if (have_tag) max_align = u.tag_ty.abiAlignment(mod); for (u.fields.values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; - const field_align = field.normalAlignment(target); + const field_align = field.normalAlignment(mod); max_align = @max(max_align, field_align); } return max_align; } - pub fn abiSize(u: Union, target: Target, have_tag: bool) u64 { - return u.getLayout(target, have_tag).abi_size; + pub fn abiSize(u: Union, mod: *const Module, have_tag: bool) u64 { + return u.getLayout(mod, have_tag).abi_size; } pub const Layout = struct { @@ -1451,7 +1474,7 @@ pub const Union = struct { }; } - pub fn getLayout(u: Union, target: Target, have_tag: bool) Layout { + pub fn getLayout(u: Union, mod: *const Module, have_tag: bool) Layout { assert(u.haveLayout()); var most_aligned_field: u32 = undefined; var most_aligned_field_size: u64 = undefined; @@ -1460,16 +1483,16 @@ pub const Union = struct { var payload_align: u32 = 0; const fields = u.fields.values(); for (fields, 0..) |field, i| { - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const field_align = a: { if (field.abi_align == 0) { - break :a field.ty.abiAlignment(target); + break :a field.ty.abiAlignment(mod); } else { break :a field.abi_align; } }; - const field_size = field.ty.abiSize(target); + const field_size = field.ty.abiSize(mod); if (field_size > payload_size) { payload_size = field_size; biggest_field = @intCast(u32, i); @@ -1481,7 +1504,7 @@ pub const Union = struct { } } payload_align = @max(payload_align, 1); - if (!have_tag or !u.tag_ty.hasRuntimeBits()) { + if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) { return .{ .abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align), .abi_align = payload_align, @@ -1497,8 +1520,8 @@ pub const Union = struct { } // Put the tag before or after the payload depending on which one's // alignment is greater. - const tag_size = u.tag_ty.abiSize(target); - const tag_align = @max(1, u.tag_ty.abiAlignment(target)); + const tag_size = u.tag_ty.abiSize(mod); + const tag_align = @max(1, u.tag_ty.abiAlignment(mod)); var size: u64 = 0; var padding: u32 = undefined; if (tag_align >= payload_align) { @@ -2281,7 +2304,7 @@ pub const ErrorMsg = struct { ) !*ErrorMsg { const err_msg = try gpa.create(ErrorMsg); errdefer gpa.destroy(err_msg); - err_msg.* = try init(gpa, src_loc, format, args); + err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args); return err_msg; } @@ -3391,6 +3414,12 @@ pub const CompileError = error{ ComptimeBreak, }; +pub fn init(mod: *Module) !void { + const gpa = mod.gpa; + try mod.error_name_list.append(gpa, "(no error)"); + try mod.intern_pool.init(gpa); +} + pub fn deinit(mod: *Module) void { const gpa = mod.gpa; @@ -3518,6 +3547,8 @@ pub fn deinit(mod: *Module) void { mod.string_literal_table.deinit(gpa); mod.string_literal_bytes.deinit(gpa); + + mod.intern_pool.deinit(gpa); } pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { @@ -4277,7 +4308,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // Update all dependents which have at least this level of dependency. // If our type remained the same and we're a function, only update // decls which depend on our body; otherwise, update all dependents. - const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag() == .Fn) .function_body else .normal; + const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag(mod) == .Fn) .function_body else .normal; for (decl.dependants.keys(), decl.dependants.values()) |dep_index, dep_type| { if (@enumToInt(dep_type) < @enumToInt(update_level)) continue; @@ -4748,8 +4779,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl_tv.ty.fmt(mod), }); } - var buffer: Value.ToTypeBuffer = undefined; - const ty = try decl_tv.val.toType(&buffer).copy(decl_arena_allocator); + const ty = try decl_tv.val.toType().copy(decl_arena_allocator); if (ty.getNamespace() == null) { return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)}); } @@ -4775,7 +4805,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { var type_changed = true; if (decl.has_tv) { - prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(); + prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); type_changed = !decl.ty.eql(decl_tv.ty, mod); if (decl.getFunction()) |prev_func| { prev_is_inline = prev_func.state == .inline_only; @@ -5510,7 +5540,7 @@ pub fn clearDecl( try mod.deleteDeclExports(decl_index); if (decl.has_tv) { - if (decl.ty.isFnOrHasRuntimeBits()) { + if (decl.ty.isFnOrHasRuntimeBits(mod)) { mod.comp.bin_file.freeDecl(decl_index); } if (decl.getInnerNamespace()) |namespace| { @@ -5699,7 +5729,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const arg_val = if (arg_tv.val.tag() != .generic_poison) arg_tv.val - else if (arg_tv.ty.onePossibleValue()) |opv| + else if (arg_tv.ty.onePossibleValue(mod)) |opv| opv else break :t arg_tv.ty; @@ -5773,7 +5803,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // If we don't get an error return trace from a caller, create our own. if (func.calls_or_awaits_errorable_fn and mod.comp.bin_file.options.error_return_tracing and - !sema.fn_ret_ty.isError()) + !sema.fn_ret_ty.isError(mod)) { sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) { // TODO make these unreachable instead of @panic @@ -5995,25 +6025,11 @@ pub fn initNewAnonDecl( // if the Decl is referenced by an instruction or another constant. Otherwise, // the Decl will be garbage collected by the `codegen_decl` task instead of sent // to the linker. - if (typed_value.ty.isFnOrHasRuntimeBits()) { + if (typed_value.ty.isFnOrHasRuntimeBits(mod)) { try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index }); } } -pub fn makeIntType(arena: Allocator, signedness: std.builtin.Signedness, bits: u16) !Type { - const int_payload = try arena.create(Type.Payload.Bits); - int_payload.* = .{ - .base = .{ - .tag = switch (signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - }, - }, - .data = bits, - }; - return Type.initPayload(&int_payload.base); -} - pub fn errNoteNonLazy( mod: *Module, src_loc: SrcLoc, @@ -6779,3 +6795,204 @@ pub fn backendSupportsFeature(mod: Module, feature: Feature) bool { .field_reordering => mod.comp.bin_file.options.use_llvm, }; } + +/// Shortcut for calling `intern_pool.get`. +pub fn intern(mod: *Module, key: InternPool.Key) Allocator.Error!InternPool.Index { + return mod.intern_pool.get(mod.gpa, key); +} + +pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type { + const i = try intern(mod, .{ .int_type = .{ + .signedness = signedness, + .bits = bits, + } }); + return i.toType(); +} + +pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { + return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); +} + +/// Returns the smallest possible integer type containing both `min` and +/// `max`. Asserts that neither value is undef. +/// TODO: if #3806 is implemented, this becomes trivial +pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { + assert(!min.isUndef()); + assert(!max.isUndef()); + + if (std.debug.runtime_safety) { + assert(Value.order(min, max, mod).compare(.lte)); + } + + const sign = min.orderAgainstZero(mod) == .lt; + + const min_val_bits = intBitsForValue(mod, min, sign); + const max_val_bits = intBitsForValue(mod, max, sign); + + return mod.intType( + if (sign) .signed else .unsigned, + @max(min_val_bits, max_val_bits), + ); +} + +/// Given a value representing an integer, returns the number of bits necessary to represent +/// this value in an integer. If `sign` is true, returns the number of bits necessary in a +/// twos-complement integer; otherwise in an unsigned integer. +/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. +pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { + assert(!val.isUndef()); + switch (val.tag()) { + .int_big_positive => { + const limbs = val.castTag(.int_big_positive).?.data; + const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true }; + return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); + }, + .int_big_negative => { + const limbs = val.castTag(.int_big_negative).?.data; + // Zero is still a possibility, in which case unsigned is fine + for (limbs) |limb| { + if (limb != 0) break; + } else return 0; // val == 0 + assert(sign); + const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false }; + return @intCast(u16, big.bitCountTwosComp()); + }, + .int_i64 => { + const x = val.castTag(.int_i64).?.data; + if (x >= 0) return Type.smallestUnsignedBits(@intCast(u64, x)); + assert(sign); + return Type.smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; + }, + else => { + const x = val.toUnsignedInt(mod); + return Type.smallestUnsignedBits(x) + @boolToInt(sign); + }, + } +} + +pub const AtomicPtrAlignmentError = error{ + FloatTooBig, + IntTooBig, + BadType, +}; + +pub const AtomicPtrAlignmentDiagnostics = struct { + bits: u16 = undefined, + max_bits: u16 = undefined, +}; + +/// If ABI alignment of `ty` is OK for atomic operations, returns 0. +/// Otherwise returns the alignment required on a pointer for the target +/// to perform atomic operations. +// TODO this function does not take into account CPU features, which can affect +// this value. Audit this! +pub fn atomicPtrAlignment( + mod: *const Module, + ty: Type, + diags: *AtomicPtrAlignmentDiagnostics, +) AtomicPtrAlignmentError!u32 { + const target = mod.getTarget(); + const max_atomic_bits: u16 = switch (target.cpu.arch) { + .avr, + .msp430, + .spu_2, + => 16, + + .arc, + .arm, + .armeb, + .hexagon, + .m68k, + .le32, + .mips, + .mipsel, + .nvptx, + .powerpc, + .powerpcle, + .r600, + .riscv32, + .sparc, + .sparcel, + .tce, + .tcele, + .thumb, + .thumbeb, + .x86, + .xcore, + .amdil, + .hsail, + .spir, + .kalimba, + .lanai, + .shave, + .wasm32, + .renderscript32, + .csky, + .spirv32, + .dxil, + .loongarch32, + .xtensa, + => 32, + + .amdgcn, + .bpfel, + .bpfeb, + .le64, + .mips64, + .mips64el, + .nvptx64, + .powerpc64, + .powerpc64le, + .riscv64, + .sparc64, + .s390x, + .amdil64, + .hsail64, + .spir64, + .wasm64, + .renderscript64, + .ve, + .spirv64, + .loongarch64, + => 64, + + .aarch64, + .aarch64_be, + .aarch64_32, + => 128, + + .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64, + }; + + const int_ty = switch (ty.zigTypeTag(mod)) { + .Int => ty, + .Enum => ty.intTagType(), + .Float => { + const bit_count = ty.floatBits(target); + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.FloatTooBig; + } + return 0; + }, + .Bool => return 0, + else => { + if (ty.isPtrAtRuntime(mod)) return 0; + return error.BadType; + }, + }; + + const bit_count = int_ty.intInfo(mod).bits; + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.IntTooBig; + } + + return 0; +} diff --git a/src/RangeSet.zig b/src/RangeSet.zig index aa051ff424fb..2e28a562c6fc 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -60,13 +60,14 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { if (self.ranges.items.len == 0) return false; + const mod = self.module; std.mem.sort(Range, self.ranges.items, LessThanContext{ .ty = ty, - .module = self.module, + .module = mod, }, lessThan); - if (!self.ranges.items[0].first.eql(first, ty, self.module) or - !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, self.module)) + if (!self.ranges.items[0].first.eql(first, ty, mod) or + !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, mod)) { return false; } @@ -76,18 +77,16 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { var counter = try std.math.big.int.Managed.init(self.ranges.allocator); defer counter.deinit(); - const target = self.module.getTarget(); - // look for gaps for (self.ranges.items[1..], 0..) |cur, i| { // i starts counting from the second item. const prev = self.ranges.items[i]; // prev.last + 1 == cur.first - try counter.copy(prev.last.toBigInt(&space, target)); + try counter.copy(prev.last.toBigInt(&space, mod)); try counter.addScalar(&counter, 1); - const cur_start_int = cur.first.toBigInt(&space, target); + const cur_start_int = cur.first.toBigInt(&space, mod); if (!cur_start_int.eq(counter.toConst())) { return false; } diff --git a/src/Sema.zig b/src/Sema.zig index 9e21bfa83d85..9b76fee68e59 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -114,6 +114,7 @@ const Package = @import("Package.zig"); const crash_report = @import("crash_report.zig"); const build_options = @import("build_options"); const Compilation = @import("Compilation.zig"); +const InternPool = @import("InternPool.zig"); pub const default_branch_quota = 1000; pub const default_reference_trace_len = 2; @@ -1614,6 +1615,7 @@ fn analyzeBodyInner( }, .@"try" => blk: { if (!block.is_comptime) break :blk try sema.zirTry(block, inst); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -1621,18 +1623,18 @@ fn analyzeBodyInner( const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(sema.mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); - const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { + const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_tv.val.toBool()) { + if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1654,11 +1656,11 @@ fn analyzeBodyInner( const err_union = try sema.analyzeLoad(block, src, operand, operand_src); const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); - const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { + const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_tv.val.toBool()) { + if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1721,17 +1723,12 @@ fn analyzeBodyInner( } pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { - var i: usize = @enumToInt(zir_ref); - + const i = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. - if (i < Zir.Inst.Ref.typed_value_map.len) { - // We intentionally map the same indexes to the same values between ZIR and AIR. - return zir_ref; - } - i -= Zir.Inst.Ref.typed_value_map.len; - - // Finally, the last section of indexes refers to the map of ZIR=>AIR. - const inst = sema.inst_map.get(@intCast(u32, i)).?; + // We intentionally map the same indexes to the same values between ZIR and AIR. + if (i < InternPool.static_len) return @intToEnum(Air.Inst.Ref, i); + // The last section of indexes refers to the map of ZIR => AIR. + const inst = sema.inst_map.get(i - InternPool.static_len).?; const ty = sema.typeOf(inst); if (ty.tag() == .generic_poison) return error.GenericPoison; return inst; @@ -1766,9 +1763,8 @@ pub fn resolveConstString( } pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { - assert(zir_ref != .var_args_param); const air_inst = try sema.resolveInst(zir_ref); - assert(air_inst != .var_args_param); + assert(air_inst != .var_args_param_type); const ty = try sema.analyzeAsType(block, src, air_inst); if (ty.tag() == .generic_poison) return error.GenericPoison; return ty; @@ -1783,8 +1779,7 @@ fn analyzeAsType( const wanted_type = Type.initTag(.type); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, "types must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - const ty = val.toType(&buffer); + const ty = val.toType(); return ty.copy(sema.arena); } @@ -1950,12 +1945,12 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( make_runtime: *bool, ) CompileError!?Value { // First section of indexes correspond to a set number of constant values. - var i: usize = @enumToInt(inst); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val; + const int = @enumToInt(inst); + if (int < InternPool.static_len) { + return @intToEnum(InternPool.Index, int).toValue(); } - i -= Air.Inst.Ref.typed_value_map.len; + const i = int - InternPool.static_len; const air_tags = sema.air_instructions.items(.tag); if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| { if (air_tags[i] == .constant) { @@ -2010,13 +2005,14 @@ fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, opt } fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError { + const mod = sema.mod; const msg = msg: { const msg = try sema.errMsg(block, src, "type '{}' does not support array initialization syntax", .{ - ty.fmt(sema.mod), + ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (ty.isSlice()) { - try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2().fmt(sema.mod)}); + try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)}); } break :msg msg; }; @@ -2042,7 +2038,8 @@ fn failWithErrorSetCodeMissing( } fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError { - if (int_ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (int_ty.zigTypeTag(mod) == .Vector) { const msg = msg: { const msg = try sema.errMsg(block, src, "overflow of vector type '{}' with value '{}'", .{ int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod), @@ -2084,12 +2081,13 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError } fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError { + const mod = sema.mod; const inner_ty = if (object_ty.isSinglePointer()) object_ty.childType() else object_ty; - if (inner_ty.zigTypeTag() == .Optional) opt: { + if (inner_ty.zigTypeTag(mod) == .Optional) opt: { var buf: Type.Payload.ElemType = undefined; const child_ty = inner_ty.optionalChild(&buf); - if (!typeSupportsFieldAccess(child_ty, field_name)) break :opt; + if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt; const msg = msg: { const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -2097,9 +2095,9 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (inner_ty.zigTypeTag() == .ErrorUnion) err: { + } else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: { const child_ty = inner_ty.errorUnionPayload(); - if (!typeSupportsFieldAccess(child_ty, field_name)) break :err; + if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err; const msg = msg: { const msg = try sema.errMsg(block, src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -2111,14 +2109,14 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); } -fn typeSupportsFieldAccess(ty: Type, field_name: []const u8) bool { - switch (ty.zigTypeTag()) { +fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: []const u8) bool { + switch (ty.zigTypeTag(mod)) { .Array => return mem.eql(u8, field_name, "len"), .Pointer => { const ptr_info = ty.ptrInfo().data; if (ptr_info.size == .Slice) { return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len"); - } else if (ptr_info.pointee_type.zigTypeTag() == .Array) { + } else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { return mem.eql(u8, field_name, "len"); } else return false; }, @@ -2352,10 +2350,10 @@ fn analyzeAsInt( dest_ty: Type, reason: []const u8, ) !u64 { + const mod = sema.mod; const coerced = try sema.coerce(block, dest_ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced, reason); - const target = sema.mod.getTarget(); - return (try val.getUnsignedIntAdvanced(target, sema)).?; + return (try val.getUnsignedIntAdvanced(mod, sema)).?; } // Returns a compile error if the value has tag `variable`. See `resolveInstValue` for @@ -2926,23 +2924,23 @@ fn zirEnumDecl( if (tag_type_ref != .none) { const ty = try sema.resolveType(block, tag_ty_src, tag_type_ref); - if (ty.zigTypeTag() != .Int and ty.zigTypeTag() != .ComptimeInt) { + if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)}); } enum_obj.tag_ty = try ty.copy(decl_arena_allocator); enum_obj.tag_ty_inferred = false; } else if (fields_len == 0) { - enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, 0); + enum_obj.tag_ty = try mod.intType(.unsigned, 0); enum_obj.tag_ty_inferred = true; } else { const bits = std.math.log2_int_ceil(usize, fields_len); - enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, bits); + enum_obj.tag_ty = try mod.intType(.unsigned, bits); enum_obj.tag_ty_inferred = true; } } - if (small.nonexhaustive and enum_obj.tag_ty.zigTypeTag() != .ComptimeInt) { - if (fields_len > 1 and std.math.log2_int(u64, fields_len) == enum_obj.tag_ty.bitSize(sema.mod.getTarget())) { + if (small.nonexhaustive and enum_obj.tag_ty.zigTypeTag(mod) != .ComptimeInt) { + if (fields_len > 1 and std.math.log2_int(u64, fields_len) == enum_obj.tag_ty.bitSize(mod)) { return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); } } @@ -3319,7 +3317,8 @@ fn ensureResultUsed( ty: Type, src: LazySrcLoc, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => return, .ErrorSet, .ErrorUnion => { const msg = msg: { @@ -3347,11 +3346,12 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); const src = inst_data.src(); const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion => { const msg = msg: { const msg = try sema.errMsg(block, src, "error is discarded", .{}); @@ -3369,16 +3369,17 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const err_union_ty = if (operand_ty.zigTypeTag() == .Pointer) + const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer) operand_ty.childType() else operand_ty; - if (err_union_ty.zigTypeTag() != .ErrorUnion) return; - const payload_ty = err_union_ty.errorUnionPayload().zigTypeTag(); + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return; + const payload_ty = err_union_ty.errorUnionPayload().zigTypeTag(mod); if (payload_ty != .Void and payload_ty != .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "error union payload is ignored", .{}); @@ -3920,19 +3921,20 @@ fn zirArrayBasePtr( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag()) { + while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; const elem_ty = sema.typeOf(base_ptr).childType(); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Array, .Vector => return base_ptr, .Struct => if (elem_ty.isTuple()) { // TODO validate element count @@ -3948,19 +3950,20 @@ fn zirFieldBasePtr( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag()) { + while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; const elem_ty = sema.typeOf(base_ptr).childType(); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Struct, .Union => return base_ptr, else => {}, } @@ -3968,6 +3971,7 @@ fn zirFieldBasePtr( } fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); @@ -3991,7 +3995,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const object_ty = sema.typeOf(object); // Each arg could be an indexable, or a range, in which case the length // is passed directly as an integer. - const is_int = switch (object_ty.zigTypeTag()) { + const is_int = switch (object_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => true, else => false, }; @@ -4000,7 +4004,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .input_index = i, } }; const arg_len_uncoerced = if (is_int) object else l: { - if (!object_ty.isIndexable()) { + if (!object_ty.isIndexable(mod)) { // Instead of using checkIndexable we customize this error. const msg = msg: { const msg = try sema.errMsg(block, arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(sema.mod)}); @@ -4010,7 +4014,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. }; return sema.failWithOwnedErrorMsg(msg); } - if (!object_ty.indexableHasLen()) continue; + if (!object_ty.indexableHasLen(mod)) continue; break :l try sema.fieldVal(block, arg_src, object, "len", arg_src); }; @@ -4061,7 +4065,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const object_ty = sema.typeOf(object); // Each arg could be an indexable, or a range, in which case the length // is passed directly as an integer. - switch (object_ty.zigTypeTag()) { + switch (object_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => continue, else => {}, } @@ -4096,13 +4100,14 @@ fn validateArrayInitTy( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_init_ty = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.ArrayInit, inst_data.payload_index).data; const ty = try sema.resolveType(block, ty_src, extra.ty); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Array => { const array_len = ty.arrayLen(); if (extra.init_count != array_len) { @@ -4141,11 +4146,12 @@ fn validateStructInitTy( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct, .Union => return, else => {}, } @@ -4160,6 +4166,7 @@ fn zirValidateStructInit( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); @@ -4168,7 +4175,7 @@ fn zirValidateStructInit( const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); const agg_ty = sema.typeOf(object_ptr).childType(); - switch (agg_ty.zigTypeTag()) { + switch (agg_ty.zigTypeTag(mod)) { .Struct => return sema.validateStructInit( block, agg_ty, @@ -4589,6 +4596,7 @@ fn zirValidateArrayInit( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); @@ -4599,7 +4607,7 @@ fn zirValidateArrayInit( const array_ty = sema.typeOf(array_ptr).childType(); const array_len = array_ty.arrayLen(); - if (instrs.len != array_len) switch (array_ty.zigTypeTag()) { + if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) { .Struct => { var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); @@ -4667,7 +4675,7 @@ fn zirValidateArrayInit( // Determine whether the value stored to this pointer is comptime-known. if (array_ty.isTuple()) { - if (array_ty.structFieldValueComptime(i)) |opv| { + if (array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv; continue; } @@ -4770,12 +4778,13 @@ fn zirValidateArrayInit( } fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - if (operand_ty.zigTypeTag() != .Pointer) { + if (operand_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(sema.mod)}); } else switch (operand_ty.ptrSize()) { .One, .C => {}, @@ -4788,7 +4797,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr return; } - const elem_ty = operand_ty.elemType2(); + const elem_ty = operand_ty.elemType2(mod); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) { return sema.fail(block, src, "cannot dereference undefined value", .{}); @@ -4818,7 +4827,8 @@ fn failWithBadMemberAccess( field_src: LazySrcLoc, field_name: []const u8, ) CompileError { - const kw_name = switch (agg_ty.zigTypeTag()) { + const mod = sema.mod; + const kw_name = switch (agg_ty.zigTypeTag(mod)) { .Union => "union", .Struct => "struct", .Opaque => "opaque", @@ -4894,8 +4904,9 @@ fn failWithBadUnionFieldAccess( } fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void { - const src_loc = decl_ty.declSrcLocOrNull(sema.mod) orelse return; - const category = switch (decl_ty.zigTypeTag()) { + const mod = sema.mod; + const src_loc = decl_ty.declSrcLocOrNull(mod) orelse return; + const category = switch (decl_ty.zigTypeTag(mod)) { .Union => "union", .Struct => "struct", .Enum => "enum", @@ -4903,7 +4914,7 @@ fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !vo .ErrorSet => "error set", else => unreachable, }; - try sema.mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); + try mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); } fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { @@ -5028,6 +5039,7 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const zir_tags = sema.code.instructions.items(.tag); const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].pl_node; @@ -5046,9 +5058,9 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v // %b = store(%a, %c) // Where %c is an error union or error set. In such case we need to add // to the current function's inferred error set, if any. - if (is_ret and (sema.typeOf(operand).zigTypeTag() == .ErrorUnion or - sema.typeOf(operand).zigTypeTag() == .ErrorSet) and - sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) + if (is_ret and (sema.typeOf(operand).zigTypeTag(mod) == .ErrorUnion or + sema.typeOf(operand).zigTypeTag(mod) == .ErrorSet) and + sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(operand); } @@ -6270,6 +6282,7 @@ fn zirCall( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const callee_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node }; const call_src = inst_data.src(); @@ -6342,7 +6355,7 @@ fn zirCall( sema.inst_map.putAssumeCapacity(inst, inst: { if (arg_index >= fn_params_len) - break :inst Air.Inst.Ref.var_args_param; + break :inst Air.Inst.Ref.var_args_param_type; if (func_ty_info.param_types[arg_index].tag() == .generic_poison) break :inst Air.Inst.Ref.generic_poison_type; @@ -6352,10 +6365,10 @@ fn zirCall( const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst); const resolved_ty = sema.typeOf(resolved); - if (resolved_ty.zigTypeTag() == .NoReturn) { + if (resolved_ty.zigTypeTag(mod) == .NoReturn) { return resolved; } - if (resolved_ty.isError()) { + if (resolved_ty.isError(mod)) { input_is_error = true; } resolved_args[arg_index] = resolved; @@ -6380,7 +6393,7 @@ fn zirCall( // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only // need to clean-up our own trace if we were passed to a non-error-handling expression. - if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError())) { + if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError(mod))) { const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const field_index = try sema.structFieldIndex(block, stack_trace_ty, "index", call_src); @@ -6417,20 +6430,21 @@ fn checkCallArgumentCount( total_args: usize, member_fn: bool, ) !Type { + const mod = sema.mod; const func_ty = func_ty: { - switch (callee_ty.zigTypeTag()) { + switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { const ptr_info = callee_ty.ptrInfo().data; - if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { + if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, .Optional => { var buf: Type.Payload.ElemType = undefined; const opt_child = callee_ty.optionalChild(&buf); - if (opt_child.zigTypeTag() == .Fn or (opt_child.isSinglePointer() and - opt_child.childType().zigTypeTag() == .Fn)) + if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer() and + opt_child.childType().zigTypeTag(mod) == .Fn)) { const msg = msg: { const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{ @@ -6488,13 +6502,14 @@ fn callBuiltin( modifier: std.builtin.CallModifier, args: []const Air.Inst.Ref, ) !void { + const mod = sema.mod; const callee_ty = sema.typeOf(builtin_fn); const func_ty = func_ty: { - switch (callee_ty.zigTypeTag()) { + switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { const ptr_info = callee_ty.ptrInfo().data; - if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { + if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, @@ -6715,7 +6730,7 @@ fn analyzeCall( @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), else => { - assert(callee_ty.isPtrAtRuntime()); + assert(callee_ty.isPtrAtRuntime(mod)); return sema.fail(block, call_src, "{s} call of function pointer", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }); @@ -6978,7 +6993,7 @@ fn analyzeCall( break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges); }; - if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag() != .NoReturn) { + if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag(mod) != .NoReturn) { try sema.emitDbgInline( block, module_fn, @@ -7068,7 +7083,7 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); try sema.queueFullTypeResolution(func_ty_info.return_type); - if (sema.owner_func != null and func_ty_info.return_type.isError()) { + if (sema.owner_func != null and func_ty_info.return_type.isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -7301,8 +7316,9 @@ fn analyzeGenericCallArg( new_fn_info: Type.Payload.Function.Data, runtime_i: *u32, ) !void { + const mod = sema.mod; const is_runtime = comptime_arg.val.tag() == .generic_poison and - comptime_arg.ty.hasRuntimeBits() and + comptime_arg.ty.hasRuntimeBits(mod) and !(try sema.typeRequiresComptime(comptime_arg.ty)); if (is_runtime) { const param_ty = new_fn_info.param_types[runtime_i.*]; @@ -7591,7 +7607,7 @@ fn instantiateGenericCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - if (sema.owner_func != null and new_fn_info.return_type.isError()) { + if (sema.owner_func != null and new_fn_info.return_type.isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -7872,8 +7888,9 @@ fn zirIntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const int_type = sema.code.instructions.items(.data)[inst].int_type; - const ty = try Module.makeIntType(sema.arena, int_type.signedness, int_type.bit_count); + const ty = try mod.intType(int_type.signedness, int_type.bit_count); return sema.addType(ty); } @@ -7882,12 +7899,13 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; const child_type = try sema.resolveType(block, operand_src, inst_data.operand); - if (child_type.zigTypeTag() == .Opaque) { + if (child_type.zigTypeTag(mod) == .Opaque) { return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); - } else if (child_type.zigTypeTag() == .Null) { + } else if (child_type.zigTypeTag(mod) == .Null) { return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); } const opt_type = try Type.optional(sema.arena, child_type); @@ -7896,14 +7914,15 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const bin = sema.code.instructions.items(.data)[inst].bin; const indexable_ty = try sema.resolveType(block, .unneeded, bin.lhs); - assert(indexable_ty.isIndexable()); // validated by a previous instruction - if (indexable_ty.zigTypeTag() == .Struct) { + assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction + if (indexable_ty.zigTypeTag(mod) == .Struct) { const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs)); return sema.addType(elem_type); } else { - const elem_type = indexable_ty.elemType2(); + const elem_type = indexable_ty.elemType2(mod); return sema.addType(elem_type); } } @@ -7960,9 +7979,10 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void { - if (elem_type.zigTypeTag() == .Opaque) { + const mod = sema.mod; + if (elem_type.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(sema.mod)}); - } else if (elem_type.zigTypeTag() == .NoReturn) { + } else if (elem_type.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{}); } } @@ -7986,6 +8006,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -7993,7 +8014,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const error_set = try sema.resolveType(block, lhs_src, extra.lhs); const payload = try sema.resolveType(block, rhs_src, extra.rhs); - if (error_set.zigTypeTag() != .ErrorSet) { + if (error_set.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{ error_set.fmt(sema.mod), }); @@ -8004,11 +8025,12 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr } fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void { - if (payload_ty.zigTypeTag() == .Opaque) { + const mod = sema.mod; + if (payload_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{ payload_ty.fmt(sema.mod), }); - } else if (payload_ty.zigTypeTag() == .ErrorSet) { + } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) { return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{ payload_ty.fmt(sema.mod), }); @@ -8089,10 +8111,10 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const uncasted_operand = try sema.resolveInst(extra.operand); const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src); - const target = sema.mod.getTarget(); + const mod = sema.mod; if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { - const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(target)); + const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod)); if (int > sema.mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); const payload = try sema.arena.create(Value.Payload.Error); @@ -8123,6 +8145,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; @@ -8130,7 +8153,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); - if (sema.typeOf(lhs).zigTypeTag() == .Bool and sema.typeOf(rhs).zigTypeTag() == .Bool) { + if (sema.typeOf(lhs).zigTypeTag(mod) == .Bool and sema.typeOf(rhs).zigTypeTag(mod) == .Bool) { const msg = msg: { const msg = try sema.errMsg(block, lhs_src, "expected error set type, found 'bool'", .{}); errdefer msg.destroy(sema.gpa); @@ -8141,9 +8164,9 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr } const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); - if (lhs_ty.zigTypeTag() != .ErrorSet) + if (lhs_ty.zigTypeTag(mod) != .ErrorSet) return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(sema.mod)}); - if (rhs_ty.zigTypeTag() != .ErrorSet) + if (rhs_ty.zigTypeTag(mod) != .ErrorSet) return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)}); // Anything merged with anyerror is anyerror. @@ -8184,6 +8207,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -8191,7 +8215,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) { + const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) { .Enum => operand, .Union => blk: { const union_ty = try sema.resolveTypeFields(operand_ty); @@ -8213,8 +8237,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; const enum_tag_ty = sema.typeOf(enum_tag); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = try enum_tag_ty.intTagType(&int_tag_type_buffer).copy(arena); + const int_tag_ty = try enum_tag_ty.intTagType().copy(arena); if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| { return sema.addConstant(int_tag_ty, opv); @@ -8231,6 +8254,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -8239,15 +8263,14 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); - if (dest_ty.zigTypeTag() != .Enum) { + if (dest_ty.zigTypeTag(mod) != .Enum) { return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(sema.mod)}); } _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); if (try sema.resolveMaybeUndefVal(operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum()) { - var buffer: Type.Payload.Bits = undefined; - const int_tag_ty = dest_ty.intTagType(&buffer); + const int_tag_ty = dest_ty.intTagType(); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { return sema.addConstant(dest_ty, int_val); } @@ -8329,11 +8352,12 @@ fn analyzeOptionalPayloadPtr( safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const optional_ptr_ty = sema.typeOf(optional_ptr); - assert(optional_ptr_ty.zigTypeTag() == .Pointer); + assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer); const opt_type = optional_ptr_ty.elemType(); - if (opt_type.zigTypeTag() != .Optional) { + if (opt_type.zigTypeTag(mod) != .Optional) { return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(sema.mod)}); } @@ -8361,7 +8385,7 @@ fn analyzeOptionalPayloadPtr( ); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { - if (val.isNull()) { + if (val.isNull(mod)) { return sema.fail(block, src, "unable to unwrap null", .{}); } // The same Value represents the pointer to the optional and the payload. @@ -8397,11 +8421,12 @@ fn zirOptionalPayload( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const result_ty = switch (operand_ty.zigTypeTag()) { + const result_ty = switch (operand_ty.zigTypeTag(mod)) { .Optional => try operand_ty.optionalChildAlloc(sema.arena), .Pointer => t: { if (operand_ty.ptrSize() != .C) { @@ -8424,7 +8449,7 @@ fn zirOptionalPayload( }; if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.isNull()) { + if (val.isNull(mod)) { return sema.fail(block, src, "unable to unwrap null", .{}); } if (val.castTag(.opt_payload)) |payload| { @@ -8450,12 +8475,13 @@ fn zirErrUnionPayload( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_src = src; const err_union_ty = sema.typeOf(operand); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(sema.mod), }); @@ -8468,7 +8494,7 @@ fn analyzeErrUnionPayload( block: *Block, src: LazySrcLoc, err_union_ty: Type, - operand: Zir.Inst.Ref, + operand: Air.Inst.Ref, operand_src: LazySrcLoc, safety_check: bool, ) CompileError!Air.Inst.Ref { @@ -8517,10 +8543,11 @@ fn analyzeErrUnionPayloadPtr( safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - assert(operand_ty.zigTypeTag() == .Pointer); + assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) { + if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ operand_ty.elemType().fmt(sema.mod), }); @@ -8594,8 +8621,9 @@ fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - if (operand_ty.zigTypeTag() != .ErrorUnion) { + if (operand_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ operand_ty.fmt(sema.mod), }); @@ -8617,13 +8645,14 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - assert(operand_ty.zigTypeTag() == .Pointer); + assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) { + if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ operand_ty.elemType().fmt(sema.mod), }); @@ -8677,8 +8706,7 @@ fn zirFunc( extra_index += ret_ty_body.len; const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, "return type must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - break :blk try ret_ty_val.toType(&buffer).copy(sema.arena); + break :blk try ret_ty_val.toType().copy(sema.arena); }, }; @@ -8849,6 +8877,7 @@ fn funcCommon( noalias_bits: u32, is_noinline: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset }; const func_src = LazySrcLoc.nodeOffset(src_node_offset); @@ -8890,31 +8919,6 @@ fn funcCommon( const target = sema.mod.getTarget(); const fn_ty: Type = fn_ty: { - // Hot path for some common function types. - // TODO can we eliminate some of these Type tag values? seems unnecessarily complicated. - if (!is_generic and block.params.items.len == 0 and !var_args and !inferred_error_set and - alignment.? == 0 and - address_space.? == target_util.defaultAddressSpace(target, .function) and - section == .default and - !is_noinline) - { - if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Unspecified) { - break :fn_ty Type.initTag(.fn_noreturn_no_args); - } - - if (bare_return_type.zigTypeTag() == .Void and cc.? == .Unspecified) { - break :fn_ty Type.initTag(.fn_void_no_args); - } - - if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Naked) { - break :fn_ty Type.initTag(.fn_naked_noreturn_no_args); - } - - if (bare_return_type.zigTypeTag() == .Void and cc.? == .C) { - break :fn_ty Type.initTag(.fn_ccc_void_no_args); - } - } - // In the case of generic calling convention, or generic alignment, we use // default values which are only meaningful for the generic function, *not* // the instantiation, which can depend on comptime parameters. @@ -8985,8 +8989,8 @@ fn funcCommon( }); }; - if (!return_type.isValidReturnType()) { - const opaque_str = if (return_type.zigTypeTag() == .Opaque) "opaque " else ""; + if (!return_type.isValidReturnType(mod)) { + const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else ""; const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{ opaque_str, return_type.fmt(sema.mod), @@ -9201,22 +9205,23 @@ fn analyzeParameter( has_body: bool, is_noalias: bool, ) !void { + const mod = sema.mod; const requires_comptime = try sema.typeRequiresComptime(param.ty); comptime_params[i] = param.is_comptime or requires_comptime; const this_generic = param.ty.tag() == .generic_poison; is_generic.* = is_generic.* or this_generic; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); if (param.is_comptime and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } if (this_generic and !sema.no_partial_func_ty and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } - if (!param.ty.isValidParamType()) { - const opaque_str = if (param.ty.zigTypeTag() == .Opaque) "opaque " else ""; + if (!param.ty.isValidParamType(mod)) { + const opaque_str = if (param.ty.zigTypeTag(mod) == .Opaque) "opaque " else ""; const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of {s}type '{}' not allowed", .{ - opaque_str, param.ty.fmt(sema.mod), + opaque_str, param.ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); @@ -9228,11 +9233,11 @@ fn analyzeParameter( if (!this_generic and !Type.fnCallingConventionAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{ - param.ty.fmt(sema.mod), @tagName(cc), + param.ty.fmt(mod), @tagName(cc), }); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl), param.ty, .param_ty); try sema.addDeclaredHereNote(msg, param.ty); @@ -9243,11 +9248,11 @@ fn analyzeParameter( if (!sema.is_generic_instantiation and requires_comptime and !param.is_comptime and has_body) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{ - param.ty.fmt(sema.mod), + param.ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl), param.ty); try sema.addDeclaredHereNote(msg, param.ty); @@ -9256,7 +9261,7 @@ fn analyzeParameter( return sema.failWithOwnedErrorMsg(msg); } if (!sema.is_generic_instantiation and !this_generic and is_noalias and - !(param.ty.zigTypeTag() == .Pointer or param.ty.isPtrLikeOptional())) + !(param.ty.zigTypeTag(mod) == .Pointer or param.ty.isPtrLikeOptional(mod))) { return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{}); } @@ -9472,13 +9477,14 @@ fn analyzeAs( zir_operand: Zir.Inst.Ref, no_cast_to_comptime_int: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand = try sema.resolveInst(zir_operand); - if (zir_dest_type == .var_args_param) return operand; + if (zir_dest_type == .var_args_param_type) return operand; const dest_ty = sema.resolveType(block, src, zir_dest_type) catch |err| switch (err) { error.GenericPoison => return operand, else => |e| return e, }; - if (dest_ty.zigTypeTag() == .NoReturn) { + if (dest_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "cannot cast to noreturn", .{}); } const is_ret = if (Zir.refToIndex(zir_dest_type)) |ptr_index| @@ -9495,11 +9501,12 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr = try sema.resolveInst(inst_data.operand); const ptr_ty = sema.typeOf(ptr); - if (!ptr_ty.isPtrAtRuntime()) { + if (!ptr_ty.isPtrAtRuntime(mod)) { return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}); } if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| { @@ -9586,25 +9593,25 @@ fn intCast( operand_src: LazySrcLoc, runtime_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); if (try sema.isComptimeKnown(operand)) { return sema.coerce(block, dest_ty, operand, operand_src); - } else if (dest_scalar_ty.zigTypeTag() == .ComptimeInt) { + } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{}); } try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src); - const is_vector = dest_ty.zigTypeTag() == .Vector; + const is_vector = dest_ty.zigTypeTag(mod) == .Vector; if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| { // requirement: intCast(u0, input) iff input == 0 if (runtime_safety and block.wantSafety()) { try sema.requireRuntimeBlock(block, src, operand_src); - const target = sema.mod.getTarget(); - const wanted_info = dest_scalar_ty.intInfo(target); + const wanted_info = dest_scalar_ty.intInfo(mod); const wanted_bits = wanted_info.bits; if (wanted_bits == 0) { @@ -9631,9 +9638,8 @@ fn intCast( try sema.requireRuntimeBlock(block, src, operand_src); if (runtime_safety and block.wantSafety()) { - const target = sema.mod.getTarget(); - const actual_info = operand_scalar_ty.intInfo(target); - const wanted_info = dest_scalar_ty.intInfo(target); + const actual_info = operand_scalar_ty.intInfo(mod); + const wanted_info = dest_scalar_ty.intInfo(mod); const actual_bits = actual_info.bits; const wanted_bits = wanted_info.bits; const actual_value_bits = actual_bits - @boolToInt(actual_info.signedness == .signed); @@ -9642,7 +9648,7 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, target); + const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, mod); const dest_max_val = if (is_vector) try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) else @@ -9653,7 +9659,7 @@ fn intCast( if (actual_info.signedness == .signed) { // Reinterpret the sign-bit as part of the value. This will make // negative differences (`operand` > `dest_max`) appear too big. - const unsigned_operand_ty = try Type.Tag.int_unsigned.create(sema.arena, actual_bits); + const unsigned_operand_ty = try mod.intType(.unsigned, actual_bits); const diff_unsigned = try block.addBitCast(unsigned_operand_ty, diff); // If the destination type is signed, then we need to double its @@ -9727,6 +9733,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -9735,7 +9742,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .AnyFrame, .ComptimeFloat, .ComptimeInt, @@ -9757,7 +9764,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const msg = msg: { const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToEnum to cast from '{}'", .{operand_ty.fmt(sema.mod)}), else => {}, } @@ -9771,7 +9778,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const msg = msg: { const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToPtr to cast from '{}'", .{operand_ty.fmt(sema.mod)}), .Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(sema.mod)}), else => {}, @@ -9782,7 +9789,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithOwnedErrorMsg(msg); }, .Struct, .Union => if (dest_ty.containerLayout() == .Auto) { - const container = switch (dest_ty.zigTypeTag()) { + const container = switch (dest_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", else => unreachable, @@ -9799,7 +9806,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Vector, => {}, } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .AnyFrame, .ComptimeFloat, .ComptimeInt, @@ -9821,7 +9828,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const msg = msg: { const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @enumToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}), else => {}, } @@ -9834,7 +9841,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const msg = msg: { const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @ptrToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}), .Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(sema.mod)}), else => {}, @@ -9845,7 +9852,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithOwnedErrorMsg(msg); }, .Struct, .Union => if (operand_ty.containerLayout() == .Auto) { - const container = switch (operand_ty.zigTypeTag()) { + const container = switch (operand_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", else => unreachable, @@ -9869,6 +9876,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -9878,7 +9886,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(extra.rhs); const target = sema.mod.getTarget(); - const dest_is_comptime_float = switch (dest_ty.zigTypeTag()) { + const dest_is_comptime_float = switch (dest_ty.zigTypeTag(mod)) { .ComptimeFloat => true, .Float => false, else => return sema.fail( @@ -9890,7 +9898,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt => {}, else => return sema.fail( block, @@ -9944,20 +9952,21 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); const elem_index = try sema.resolveInst(extra.rhs); const indexable_ty = sema.typeOf(array_ptr); - if (indexable_ty.zigTypeTag() != .Pointer) { + if (indexable_ty.zigTypeTag(mod) != .Pointer) { const capture_src: LazySrcLoc = .{ .for_capture_from_input = inst_data.src_node }; const msg = msg: { const msg = try sema.errMsg(block, capture_src, "pointer capture of non pointer type '{}'", .{ indexable_ty.fmt(sema.mod), }); errdefer msg.destroy(sema.gpa); - if (indexable_ty.zigTypeTag() == .Array) { + if (indexable_ty.zigTypeTag(mod) == .Array) { try sema.errNote(block, src, msg, "consider using '&' here", .{}); } break :msg msg; @@ -10076,6 +10085,7 @@ fn zirSwitchCapture( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const zir_datas = sema.code.instructions.items(.data); const capture_info = zir_datas[inst].switch_capture; const switch_info = zir_datas[capture_info.switch_inst].pl_node; @@ -10091,7 +10101,7 @@ fn zirSwitchCapture( if (block.inline_case_capture != .none) { const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable; - if (operand_ty.zigTypeTag() == .Union) { + if (operand_ty.zigTypeTag(mod) == .Union) { const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?); const union_obj = operand_ty.cast(Type.Payload.Union).?.data; const field_ty = union_obj.fields.values()[field_index].ty; @@ -10144,7 +10154,7 @@ fn zirSwitchCapture( return operand_ptr; } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ErrorSet => if (block.switch_else_err_ty) |some| { return sema.bitCast(block, some, operand, operand_src, null); } else { @@ -10162,7 +10172,7 @@ fn zirSwitchCapture( switch_extra.data.getScalarProng(sema.code, switch_extra.end, capture_info.prong_index).item, }; - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Union => { const union_obj = operand_ty.cast(Type.Payload.Union).?.data; const first_item = try sema.resolveInst(items[0]); @@ -10269,6 +10279,7 @@ fn zirSwitchCapture( } fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_tok; const src = inst_data.src(); @@ -10280,7 +10291,7 @@ fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile const operand_ptr_ty = sema.typeOf(operand_ptr); const operand_ty = if (is_ref) operand_ptr_ty.childType() else operand_ptr_ty; - if (operand_ty.zigTypeTag() != .Union) { + if (operand_ty.zigTypeTag(mod) != .Union) { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot capture tag of non-union type '{}'", .{ operand_ty.fmt(sema.mod), @@ -10301,6 +10312,7 @@ fn zirSwitchCond( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; @@ -10311,7 +10323,7 @@ fn zirSwitchCond( operand_ptr; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Type, .Void, .Bool, @@ -10371,6 +10383,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -10415,7 +10428,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const target_ty = sema.typeOf(raw_operand); break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.elemType() else target_ty; }; - const union_originally = maybe_union_ty.zigTypeTag() == .Union; + const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union; // Duplicate checking variables later also used for `inline else`. var seen_enum_fields: []?Module.SwitchProngSrc = &.{}; @@ -10433,7 +10446,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var empty_enum = false; const operand_ty = sema.typeOf(operand); - const err_set = operand_ty.zigTypeTag() == .ErrorSet; + const err_set = operand_ty.zigTypeTag(mod) == .ErrorSet; var else_error_ty: ?Type = null; @@ -10459,10 +10472,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return sema.failWithOwnedErrorMsg(msg); } - const target = sema.mod.getTarget(); - // Validate for duplicate items, missing else prong, and invalid range. - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Union => unreachable, // handled in zirSwitchCond .Enum => { seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount()); @@ -10774,12 +10785,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } check_range: { - if (operand_ty.zigTypeTag() == .Int) { + if (operand_ty.zigTypeTag(mod) == .Int) { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); - const min_int = try operand_ty.minInt(arena.allocator(), target); - const max_int = try operand_ty.maxInt(arena.allocator(), target); + const min_int = try operand_ty.minInt(arena.allocator(), mod); + const max_int = try operand_ty.maxInt(arena.allocator(), mod); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( @@ -11080,7 +11091,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand)) { return Air.Inst.Ref.unreachable_value; } - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag() == .Enum and + if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { try sema.zirDbgStmt(block, cond_dbg_node_index); @@ -11135,7 +11146,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand)) { @@ -11242,7 +11253,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { @@ -11286,7 +11297,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = try sema.resolveInst(item_ref); const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - if (field_ty.zigTypeTag() != .NoReturn) break true; + if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else true; @@ -11409,7 +11420,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var final_else_body: []const Air.Inst.Index = &.{}; if (special.body.len != 0 or !is_first or case_block.wantSafety()) { var emit_bb = false; - if (special.is_inline) switch (operand_ty.zigTypeTag()) { + if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) { .Enum => { if (operand_ty.isNonexhaustiveEnum() and !union_originally) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ @@ -11429,7 +11440,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src); @@ -11551,7 +11562,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.inline_case_capture = .none; if (sema.mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and - operand_ty.zigTypeTag() == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) + operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { try sema.zirDbgStmt(&case_block, cond_dbg_node_index); const ok = try case_block.addUnOp(.is_named_enum_value, operand); @@ -11563,7 +11574,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (seen_field != null) continue; const union_obj = maybe_union_ty.cast(Type.Payload.Union).?.data; const field_ty = union_obj.fields.values()[index].ty; - if (field_ty.zigTypeTag() != .NoReturn) break true; + if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else true; @@ -11629,9 +11640,9 @@ const RangeSetUnhandledIterator = struct { first: bool = true, fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { - const target = sema.mod.getTarget(); - const min = try ty.minInt(sema.arena, target); - const max = try ty.maxInt(sema.arena, target); + const mod = sema.mod; + const min = try ty.minInt(sema.arena, mod); + const max = try ty.maxInt(sema.arena, mod); return RangeSetUnhandledIterator{ .sema = sema, @@ -11931,18 +11942,19 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op } fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void { + const mod = sema.mod; const index = Zir.refToIndex(cond) orelse return; if (sema.code.instructions.items(.tag)[index] != .is_non_err) return; const err_inst_data = sema.code.instructions.items(.data)[index].un_node; const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); - if (operand_ty.zigTypeTag() == .ErrorSet) { + if (operand_ty.zigTypeTag(mod) == .ErrorSet) { try sema.maybeErrorUnwrapComptime(block, body, err_operand); return; } if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| { - if (!operand_ty.isError()) return; + if (!operand_ty.isError(mod)) return; if (val.getError() == null) return; try sema.maybeErrorUnwrapComptime(block, body, err_operand); } @@ -11972,6 +11984,7 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I } fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -11995,7 +12008,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; break :hf field_index < ty.structFieldCount(); } - break :hf switch (ty.zigTypeTag()) { + break :hf switch (ty.zigTypeTag(mod)) { .Struct => ty.structFields().contains(field_name), .Union => ty.unionFields().contains(field_name), .Enum => ty.enumFields().contains(field_name), @@ -12126,6 +12139,7 @@ fn zirShl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); sema.src = src; @@ -12136,11 +12150,10 @@ fn zirShl( const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const target = sema.mod.getTarget(); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - const scalar_ty = lhs_ty.scalarType(); - const scalar_rhs_ty = rhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); + const scalar_rhs_ty = rhs_ty.scalarType(mod); // TODO coerce rhs if air_tag is not shl_sat const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty); @@ -12156,18 +12169,18 @@ fn zirShl( if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { return lhs; } - if (scalar_ty.zigTypeTag() != .ComptimeInt and air_tag != .shl_sat) { + if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { var bits_payload = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(target).bits, + .data = scalar_ty.intInfo(mod).bits, }; const bit_value = Value.initPayload(&bits_payload.base); - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.gte, bit_value, target)) { + if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, @@ -12175,26 +12188,26 @@ fn zirShl( }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, target)) { + } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), scalar_ty.fmt(sema.mod), }); } } - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.lt, Value.zero, target)) { + if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, target)) { + } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -12204,7 +12217,7 @@ fn zirShl( const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty); const rhs_val = maybe_rhs_val orelse { - if (scalar_ty.zigTypeTag() == .ComptimeInt) { + if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); } break :rs rhs_src; @@ -12213,7 +12226,7 @@ fn zirShl( const val = switch (air_tag) { .shl_exact => val: { const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, sema.mod); - if (scalar_ty.zigTypeTag() == .ComptimeInt) { + if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { break :val shifted.wrapped_result; } if (shifted.overflow_bit.compareAllWithZero(.eq, sema.mod)) { @@ -12222,12 +12235,12 @@ fn zirShl( return sema.fail(block, src, "operation caused overflow", .{}); }, - .shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt) + .shl_sat => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) else try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod), - .shl => if (scalar_ty.zigTypeTag() == .ComptimeInt) + .shl => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) else try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, sema.mod), @@ -12241,11 +12254,11 @@ fn zirShl( const new_rhs = if (air_tag == .shl_sat) rhs: { // Limit the RHS type for saturating shl to be an integer as small as the LHS. if (rhs_is_comptime_int or - scalar_rhs_ty.intInfo(target).bits > scalar_ty.intInfo(target).bits) + scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits) { const max_int = try sema.addConstant( lhs_ty, - try lhs_ty.maxInt(sema.arena, target), + try lhs_ty.maxInt(sema.arena, mod), ); const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); @@ -12256,11 +12269,11 @@ fn zirShl( try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { - const bit_count = scalar_ty.intInfo(target).bits; + const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ @@ -12290,7 +12303,7 @@ fn zirShl( } }, }); const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty); - const any_ov_bit = if (lhs_ty.zigTypeTag() == .Vector) + const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector) try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ @@ -12319,6 +12332,7 @@ fn zirShr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); sema.src = src; @@ -12330,8 +12344,7 @@ fn zirShr( const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - const target = sema.mod.getTarget(); - const scalar_ty = lhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs); @@ -12344,18 +12357,18 @@ fn zirShr( if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { return lhs; } - if (scalar_ty.zigTypeTag() != .ComptimeInt) { + if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { var bits_payload = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(target).bits, + .data = scalar_ty.intInfo(mod).bits, }; const bit_value = Value.initPayload(&bits_payload.base); - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.gte, bit_value, target)) { + if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, @@ -12363,26 +12376,26 @@ fn zirShr( }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, target)) { + } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), scalar_ty.fmt(sema.mod), }); } } - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.lt, Value.zero, target)) { + if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, target)) { + } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -12405,18 +12418,18 @@ fn zirShr( } } else rhs_src; - if (maybe_rhs_val == null and scalar_ty.zigTypeTag() == .ComptimeInt) { + if (maybe_rhs_val == null and scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); } try sema.requireRuntimeBlock(block, src, runtime_src); const result = try block.addBinOp(air_tag, lhs, rhs); if (block.wantSafety()) { - const bit_count = scalar_ty.intInfo(target).bits; + const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ @@ -12436,7 +12449,7 @@ fn zirShr( if (air_tag == .shr_exact) { const back = try block.addBinOp(.shl, result, rhs); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const eql = try block.addCmpVector(lhs, back, .eq); break :ok try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, @@ -12461,6 +12474,7 @@ fn zirBitwise( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -12475,8 +12489,8 @@ fn zirBitwise( const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); - const scalar_type = resolved_type.scalarType(); - const scalar_tag = scalar_type.zigTypeTag(); + const scalar_type = resolved_type.scalarType(mod); + const scalar_tag = scalar_type.zigTypeTag(mod); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -12484,7 +12498,7 @@ fn zirBitwise( const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; if (!is_int) { - return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); + return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(mod)), @tagName(rhs_ty.zigTypeTag(mod)) }); } const runtime_src = runtime: { @@ -12515,15 +12529,16 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_type = sema.typeOf(operand); - const scalar_type = operand_type.scalarType(); + const scalar_type = operand_type.scalarType(mod); - if (scalar_type.zigTypeTag() != .Int) { + if (scalar_type.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{ operand_type.fmt(sema.mod), }); @@ -12532,7 +12547,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) { return sema.addConstUndef(operand_type); - } else if (operand_type.zigTypeTag() == .Vector) { + } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen()); var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); @@ -12728,18 +12743,19 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod); + const mod = sema.mod; const ptr_addrspace = p: { - if (lhs_ty.zigTypeTag() == .Pointer) break :p lhs_ty.ptrAddressSpace(); - if (rhs_ty.zigTypeTag() == .Pointer) break :p rhs_ty.ptrAddressSpace(); + if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(); + if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(); break :p null; }; - const runtime_src = if (switch (lhs_ty.zigTypeTag()) { + const runtime_src = if (switch (lhs_ty.zigTypeTag(mod)) { .Array, .Struct => try sema.resolveMaybeUndefVal(lhs), .Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs), else => unreachable, }) |lhs_val| rs: { - if (switch (rhs_ty.zigTypeTag()) { + if (switch (rhs_ty.zigTypeTag(mod)) { .Array, .Struct => try sema.resolveMaybeUndefVal(rhs), .Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs), else => unreachable, @@ -12841,8 +12857,9 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Array => return operand_ty.arrayInfo(), .Pointer => { const ptr_info = operand_ty.ptrInfo().data; @@ -12859,7 +12876,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins }; }, .One => { - if (ptr_info.pointee_type.zigTypeTag() == .Array) { + if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { return ptr_info.pointee_type.arrayInfo(); } }, @@ -12867,10 +12884,10 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins } }, .Struct => { - if (operand_ty.isTuple() and peer_ty.isIndexable()) { + if (operand_ty.isTuple() and peer_ty.isIndexable(mod)) { assert(!peer_ty.isTuple()); return .{ - .elem_type = peer_ty.elemType2(), + .elem_type = peer_ty.elemType2(mod), .sentinel = null, .len = operand_ty.arrayLen(), }; @@ -12970,11 +12987,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } // Analyze the lhs first, to catch the case that someone tried to do exponentiation + const mod = sema.mod; const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse { const msg = msg: { const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => { try sema.errNote(block, operator_src, msg, "this operator multiplies arrays; use std.math.pow for exponentiation", .{}); }, @@ -12994,7 +13012,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod); - const ptr_addrspace = if (lhs_ty.zigTypeTag() == .Pointer) lhs_ty.ptrAddressSpace() else null; + const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace() else null; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { @@ -13082,6 +13100,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const lhs_src = src; @@ -13089,9 +13108,9 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const rhs = try sema.resolveInst(inst_data.operand); const rhs_ty = sema.typeOf(rhs); - const rhs_scalar_ty = rhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - if (rhs_scalar_ty.isUnsignedInt() or switch (rhs_scalar_ty.zigTypeTag()) { + if (rhs_scalar_ty.isUnsignedInt(mod) or switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => false, else => true, }) { @@ -13108,7 +13127,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs); } - const lhs = if (rhs_ty.zigTypeTag() == .Vector) + const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) else try sema.resolveInst(.zero); @@ -13117,6 +13136,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const lhs_src = src; @@ -13124,14 +13144,14 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const rhs = try sema.resolveInst(inst_data.operand); const rhs_ty = sema.typeOf(rhs); - const rhs_scalar_ty = rhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - switch (rhs_scalar_ty.zigTypeTag()) { + switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => {}, else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}), } - const lhs = if (rhs_ty.zigTypeTag() == .Vector) + const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) else try sema.resolveInst(.zero); @@ -13161,6 +13181,7 @@ fn zirArithmetic( } fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13171,8 +13192,8 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13181,25 +13202,24 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); - if ((lhs_ty.zigTypeTag() == .ComptimeFloat and rhs_ty.zigTypeTag() == .ComptimeInt) or - (lhs_ty.zigTypeTag() == .ComptimeInt and rhs_ty.zigTypeTag() == .ComptimeFloat)) + if ((lhs_ty.zigTypeTag(mod) == .ComptimeFloat and rhs_ty.zigTypeTag(mod) == .ComptimeInt) or + (lhs_ty.zigTypeTag(mod) == .ComptimeInt and rhs_ty.zigTypeTag(mod) == .ComptimeFloat)) { // If it makes a difference whether we coerce to ints or floats before doing the division, error. // If lhs % rhs is 0, it doesn't matter. @@ -13268,7 +13288,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const runtime_src = rs: { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { return sema.addConstUndef(resolved_type); @@ -13309,7 +13329,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } const air_tag = if (is_int) blk: { - if (lhs_ty.isSignedInt() or rhs_ty.isSignedInt()) { + if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) { return sema.fail(block, src, "division with '{s}' and '{s}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()) }); } break :blk Air.Inst.Tag.div_trunc; @@ -13321,6 +13341,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13331,8 +13352,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13341,19 +13362,18 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13437,7 +13457,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ok = if (!is_int) ok: { const floored = try block.addUnOp(.floor, result); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { const eql = try block.addCmpVector(result, floored, .eq); break :ok try block.addInst(.{ .tag = switch (block.float_mode) { @@ -13459,7 +13479,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else ok: { const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); const zero = try sema.addConstant(resolved_type, zero_val); const eql = try block.addCmpVector(remainder, zero, .eq); @@ -13484,6 +13504,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13494,8 +13515,8 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13504,20 +13525,19 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13562,7 +13582,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { return sema.addConstUndef(resolved_type); @@ -13600,6 +13620,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13610,8 +13631,8 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13620,20 +13641,19 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13677,7 +13697,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { return sema.addConstUndef(resolved_type); @@ -13727,22 +13747,20 @@ fn addDivIntOverflowSafety( casted_rhs: Air.Inst.Ref, is_int: bool, ) CompileError!void { + const mod = sema.mod; if (!is_int) return; // If the LHS is unsigned, it cannot cause overflow. - if (!lhs_scalar_ty.isSignedInt()) return; - - const mod = sema.mod; - const target = mod.getTarget(); + if (!lhs_scalar_ty.isSignedInt(mod)) return; // If the LHS is widened to a larger integer type, no overflow is possible. - if (lhs_scalar_ty.intInfo(target).bits < resolved_type.intInfo(target).bits) { + if (lhs_scalar_ty.intInfo(mod).bits < resolved_type.intInfo(mod).bits) { return; } - const min_int = try resolved_type.minInt(sema.arena, target); + const min_int = try resolved_type.minInt(sema.arena, mod); const neg_one_scalar = try Value.Tag.int_i64.create(sema.arena, -1); - const neg_one = if (resolved_type.zigTypeTag() == .Vector) + const neg_one = if (resolved_type.zigTypeTag(mod) == .Vector) try Value.Tag.repeated.create(sema.arena, neg_one_scalar) else neg_one_scalar; @@ -13759,7 +13777,7 @@ fn addDivIntOverflowSafety( } var ok: Air.Inst.Ref = .none; - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { if (maybe_lhs_val == null) { const min_int_ref = try sema.addConstant(resolved_type, min_int); ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq); @@ -13815,7 +13833,8 @@ fn addDivByZeroSafety( // emitted above. if (maybe_rhs_val != null) return; - const ok = if (resolved_type.zigTypeTag() == .Vector) ok: { + const mod = sema.mod; + const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); const zero = try sema.addConstant(resolved_type, zero_val); const ok = try block.addCmpVector(casted_rhs, zero, .neq); @@ -13842,6 +13861,7 @@ fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst } fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13852,8 +13872,8 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13862,20 +13882,19 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13904,7 +13923,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } else Value.zero; return sema.addConstant(resolved_type, zero_val); } - } else if (lhs_scalar_ty.isSignedInt()) { + } else if (lhs_scalar_ty.isSignedInt(mod)) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } if (maybe_rhs_val) |rhs_val| { @@ -13929,7 +13948,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.addConstant(resolved_type, rem_result); } break :rs lhs_src; - } else if (rhs_scalar_ty.isSignedInt()) { + } else if (rhs_scalar_ty.isSignedInt(mod)) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } else { break :rs rhs_src; @@ -13978,7 +13997,8 @@ fn intRem( lhs: Value, rhs: Value, ) CompileError!Value { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; @@ -13997,13 +14017,13 @@ fn intRemScalar( lhs: Value, rhs: Value, ) CompileError!Value { - const target = sema.mod.getTarget(); + const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs_q = try sema.arena.alloc( math.big.Limb, lhs_bigint.limbs.len, @@ -14025,6 +14045,7 @@ fn intRemScalar( } fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -14035,8 +14056,8 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -14048,13 +14069,12 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -14127,6 +14147,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -14137,8 +14158,8 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -14150,13 +14171,12 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -14268,7 +14288,7 @@ fn zirOverflowArithmetic( const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src); const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src); - if (dest_ty.scalarType().zigTypeTag() != .Int) { + if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) { return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(mod)}); } @@ -14434,12 +14454,14 @@ fn zirOverflowArithmetic( } fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value { - if (ty.zigTypeTag() != .Vector) return val; + const mod = sema.mod; + if (ty.zigTypeTag(mod) != .Vector) return val; return Value.Tag.repeated.create(sema.arena, val); } fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { - const ov_ty = if (ty.zigTypeTag() == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1; + const mod = sema.mod; + const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1; const types = try sema.arena.alloc(Type, 2); const values = try sema.arena.alloc(Value, 2); @@ -14468,10 +14490,11 @@ fn analyzeArithmetic( rhs_src: LazySrcLoc, want_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) { @@ -14491,18 +14514,17 @@ fn analyzeArithmetic( .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: { @@ -14910,7 +14932,7 @@ fn analyzeArithmetic( } }, }); const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty); - const any_ov_bit = if (resolved_type.zigTypeTag() == .Vector) + const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector) try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ @@ -14944,12 +14966,12 @@ fn analyzePtrArithmetic( // TODO if the operand is comptime-known to be negative, or is a negative int, // coerce to isize instead of usize. const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src); - const target = sema.mod.getTarget(); + const mod = sema.mod; const opt_ptr_val = try sema.resolveMaybeUndefVal(ptr); const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset); const ptr_ty = sema.typeOf(ptr); const ptr_info = ptr_ty.ptrInfo().data; - const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Array) + const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Array) ptr_info.pointee_type.childType() else ptr_info.pointee_type; @@ -14963,9 +14985,9 @@ fn analyzePtrArithmetic( } // If the addend is not a comptime-known value we can still count on // it being a multiple of the type size. - const elem_size = elem_ty.abiSize(target); + const elem_size = elem_ty.abiSize(mod); const addend = if (opt_off_val) |off_val| a: { - const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(target)); + const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(mod)); break :a elem_size * off_int; } else elem_size; @@ -14991,10 +15013,10 @@ fn analyzePtrArithmetic( if (opt_off_val) |offset_val| { if (ptr_val.isUndef()) return sema.addConstUndef(new_ptr_ty); - const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(target)); + const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod)); if (offset_int == 0) return ptr; - if (try ptr_val.getUnsignedIntAdvanced(target, sema)) |addr| { - const elem_size = elem_ty.abiSize(target); + if (try ptr_val.getUnsignedIntAdvanced(mod, sema)) |addr| { + const elem_size = elem_ty.abiSize(mod); const new_addr = switch (air_tag) { .ptr_add => addr + elem_size * offset_int, .ptr_sub => addr - elem_size * offset_int, @@ -15116,6 +15138,7 @@ fn zirAsm( const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len); const inputs = try sema.arena.alloc(ConstraintName, inputs_len); + const mod = sema.mod; for (args, 0..) |*arg, arg_i| { const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i); @@ -15123,7 +15146,7 @@ fn zirAsm( const uncasted_arg = try sema.resolveInst(input.data.operand); const uncasted_arg_ty = sema.typeOf(uncasted_arg); - switch (uncasted_arg_ty.zigTypeTag()) { + switch (uncasted_arg_ty.zigTypeTag(mod)) { .ComptimeInt => arg.* = try sema.coerce(block, Type.initTag(.usize), uncasted_arg, src), .ComptimeFloat => arg.* = try sema.coerce(block, Type.initTag(.f64), uncasted_arg, src), else => { @@ -15205,6 +15228,7 @@ fn zirCmpEq( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = inst_data.src(); @@ -15215,8 +15239,8 @@ fn zirCmpEq( const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_ty_tag = lhs_ty.zigTypeTag(); - const rhs_ty_tag = rhs_ty.zigTypeTag(); + const lhs_ty_tag = lhs_ty.zigTypeTag(mod); + const rhs_ty_tag = rhs_ty.zigTypeTag(mod); if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) { // null == null, null != null if (op == .eq) { @@ -15295,6 +15319,7 @@ fn analyzeCmpUnionTag( tag_src: LazySrcLoc, op: std.math.CompareOperator, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const union_ty = try sema.resolveTypeFields(sema.typeOf(un)); const union_tag_ty = union_ty.unionTagType() orelse { const msg = msg: { @@ -15313,7 +15338,7 @@ fn analyzeCmpUnionTag( if (try sema.resolveMaybeUndefVal(coerced_tag)) |enum_val| { if (enum_val.isUndef()) return sema.addConstUndef(Type.bool); const field_ty = union_ty.unionFieldType(enum_val, sema.mod); - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } } @@ -15352,32 +15377,33 @@ fn analyzeCmp( rhs_src: LazySrcLoc, is_equality_cmp: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - if (lhs_ty.zigTypeTag() != .Optional and rhs_ty.zigTypeTag() != .Optional) { + if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) { try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); } - if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector and rhs_ty.zigTypeTag(mod) == .Vector) { return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src); } - if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) { + if (lhs_ty.isNumeric(mod) and rhs_ty.isNumeric(mod)) { // This operation allows any combination of integer and float types, regardless of the // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for // numeric types. return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src); } - if (is_equality_cmp and lhs_ty.zigTypeTag() == .ErrorUnion and rhs_ty.zigTypeTag() == .ErrorSet) { + if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorUnion and rhs_ty.zigTypeTag(mod) == .ErrorSet) { const casted_lhs = try sema.analyzeErrUnionCode(block, lhs_src, lhs); return sema.cmpSelf(block, src, casted_lhs, rhs, op, lhs_src, rhs_src); } - if (is_equality_cmp and lhs_ty.zigTypeTag() == .ErrorSet and rhs_ty.zigTypeTag() == .ErrorUnion) { + if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorSet and rhs_ty.zigTypeTag(mod) == .ErrorUnion) { const casted_rhs = try sema.analyzeErrUnionCode(block, rhs_src, rhs); return sema.cmpSelf(block, src, lhs, casted_rhs, op, lhs_src, rhs_src); } const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); - if (!resolved_type.isSelfComparable(is_equality_cmp)) { + if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) { return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{ compareOperatorName(op), resolved_type.fmt(sema.mod), }); @@ -15408,6 +15434,7 @@ fn cmpSelf( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const resolved_type = sema.typeOf(casted_lhs); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { @@ -15415,7 +15442,7 @@ fn cmpSelf( if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool); const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type); return sema.addConstant(result_ty, cmp_val); @@ -15427,7 +15454,7 @@ fn cmpSelf( return Air.Inst.Ref.bool_false; } } else { - if (resolved_type.zigTypeTag() == .Bool) { + if (resolved_type.zigTypeTag(mod) == .Bool) { // We can lower bool eq/neq more efficiently. return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src); } @@ -15436,7 +15463,7 @@ fn cmpSelf( } else { // For bools, we still check the other operand, because we can lower // bool eq/neq more efficiently. - if (resolved_type.zigTypeTag() == .Bool) { + if (resolved_type.zigTypeTag(mod) == .Bool) { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src); @@ -15446,7 +15473,7 @@ fn cmpSelf( } }; try sema.requireRuntimeBlock(block, src, runtime_src); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { return block.addCmpVector(casted_lhs, casted_rhs, op); } const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized); @@ -15475,10 +15502,11 @@ fn runtimeBoolCmp( } fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Fn, .NoReturn, .Undefined, @@ -15509,8 +15537,7 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .AnyFrame, => {}, } - const target = sema.mod.getTarget(); - const val = try ty.lazyAbiSize(target, sema.arena); + const val = try ty.lazyAbiSize(mod, sema.arena); if (val.tag() == .lazy_size) { try sema.queueFullTypeResolution(ty); } @@ -15518,10 +15545,11 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Fn, .NoReturn, .Undefined, @@ -15552,8 +15580,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .AnyFrame, => {}, } - const target = sema.mod.getTarget(); - const bit_size = try operand_ty.bitSizeAdvanced(target, sema); + const bit_size = try operand_ty.bitSizeAdvanced(mod, sema); return sema.addIntUnsigned(Type.comptime_int, bit_size); } @@ -15765,13 +15792,13 @@ fn zirBuiltinSrc( } fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); const type_info_ty = try sema.getBuiltinType("Type"); - const target = sema.mod.getTarget(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Type => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ @@ -15881,8 +15908,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); try sema.ensureDeclAnalyzed(fn_info_decl_index); const fn_info_decl = sema.mod.declPtr(fn_info_decl_index); - var fn_ty_buffer: Value.ToTypeBuffer = undefined; - const fn_ty = fn_info_decl.val.toType(&fn_ty_buffer); + const fn_ty = fn_info_decl.val.toType(); const param_info_decl_index = (try sema.namespaceLookup( block, src, @@ -15892,8 +15918,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); try sema.ensureDeclAnalyzed(param_info_decl_index); const param_info_decl = sema.mod.declPtr(param_info_decl_index); - var param_buffer: Value.ToTypeBuffer = undefined; - const param_ty = param_info_decl.val.toType(¶m_buffer); + const param_ty = param_info_decl.val.toType(); const new_decl = try params_anon_decl.finish( try Type.Tag.array.create(params_anon_decl.arena(), .{ .len = param_vals.len, @@ -15924,7 +15949,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // calling_convention: CallingConvention, try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)), // alignment: comptime_int, - try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(target)), + try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(mod)), // is_generic: bool, Value.makeBool(info.is_generic), // is_var_args: bool, @@ -15944,7 +15969,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const field_values = try sema.arena.alloc(Value, 2); // signedness: Signedness, field_values[0] = try Value.Tag.enum_field_index.create( @@ -15965,7 +15990,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Float => { const field_values = try sema.arena.alloc(Value, 1); // bits: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(target)); + field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(mod)); return sema.addConstant( type_info_ty, @@ -15980,7 +16005,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const alignment = if (info.@"align" != 0) try Value.Tag.int_u64.create(sema.arena, info.@"align") else - try info.pointee_type.lazyAbiAlignment(target, sema.arena); + try info.pointee_type.lazyAbiAlignment(mod, sema.arena); const field_values = try sema.arena.create([8]Value); field_values.* = .{ @@ -16072,8 +16097,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); try sema.ensureDeclAnalyzed(set_field_ty_decl_index); const set_field_ty_decl = sema.mod.declPtr(set_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try set_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + break :t try set_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; try sema.queueFullTypeResolution(try error_field_ty.copy(sema.arena)); @@ -16164,8 +16188,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Enum => { // TODO: look into memoizing this result. - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = try ty.intTagType(&int_tag_type_buffer).copy(sema.arena); + const int_tag_ty = try ty.intTagType().copy(sema.arena); const is_exhaustive = Value.makeBool(!ty.isNonexhaustiveEnum()); @@ -16182,8 +16205,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); const enum_field_ty_decl = sema.mod.declPtr(enum_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try enum_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + break :t try enum_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; const enum_fields = ty.enumFields(); @@ -16275,8 +16297,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); try sema.ensureDeclAnalyzed(union_field_ty_decl_index); const union_field_ty_decl = sema.mod.declPtr(union_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try union_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + break :t try union_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; const union_ty = try sema.resolveTypeFields(ty); @@ -16383,8 +16404,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); const struct_field_ty_decl = sema.mod.declPtr(struct_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try struct_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + break :t try struct_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; const struct_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout @@ -16430,7 +16450,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(is_comptime), // alignment: comptime_int, - try field_ty.lazyAbiAlignment(target, fields_anon_decl.arena()), + try field_ty.lazyAbiAlignment(mod, fields_anon_decl.arena()), }; struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); } @@ -16463,7 +16483,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else field.default_val; const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val); - const alignment = field.alignment(target, layout); + const alignment = field.alignment(mod, layout); struct_field_fields.* = .{ // name: []const u8, @@ -16506,7 +16526,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (layout == .Packed) { const struct_obj = struct_ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); - assert(struct_obj.backing_int_ty.isInt()); + assert(struct_obj.backing_int_ty.isInt(mod)); const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty); break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val); } else { @@ -16584,8 +16604,7 @@ fn typeInfoDecls( try sema.mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); try sema.ensureDeclAnalyzed(declaration_ty_decl_index); const declaration_ty_decl = sema.mod.declPtr(declaration_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try declaration_ty_decl.val.toType(&buffer).copy(decls_anon_decl.arena()); + break :t try declaration_ty_decl.val.toType().copy(decls_anon_decl.arena()); }; try sema.queueFullTypeResolution(try declaration_ty.copy(sema.arena)); @@ -16632,8 +16651,7 @@ fn typeInfoNamespaceDecls( if (decl.kind == .@"usingnamespace") { if (decl.analysis == .in_progress) continue; try sema.mod.ensureDeclAnalyzed(decl_index); - var buf: Value.ToTypeBuffer = undefined; - const new_ns = decl.val.toType(&buf).getNamespace().?; + const new_ns = decl.val.toType().getNamespace().?; try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); continue; } @@ -16709,10 +16727,11 @@ fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type { - switch (operand.zigTypeTag()) { + const mod = sema.mod; + switch (operand.zigTypeTag(mod)) { .ComptimeInt => return Type.comptime_int, .Int => { - const bits = operand.bitSize(sema.mod.getTarget()); + const bits = operand.bitSize(mod); const count = if (bits == 0) 0 else blk: { @@ -16723,10 +16742,10 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi } break :blk count; }; - return Module.makeIntType(sema.arena, .unsigned, count); + return mod.intType(.unsigned, count); }, .Vector => { - const elem_ty = operand.elemType2(); + const elem_ty = operand.elemType2(mod); const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); return Type.Tag.vector.create(sema.arena, .{ .len = operand.vectorLen(), @@ -16920,9 +16939,10 @@ fn finishCondBr( } fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Optional, .Null, .Undefined => return, - .Pointer => if (ty.isPtrLikeOptional()) return, + .Pointer => if (ty.isPtrLikeOptional(mod)) return, else => {}, } return sema.failWithExpectedOptionalType(block, src, ty); @@ -16951,10 +16971,11 @@ fn zirIsNonNullPtr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = try sema.resolveInst(inst_data.operand); - try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2()); + try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2(mod)); if ((try sema.resolveMaybeUndefVal(ptr)) == null) { return block.addUnOp(.is_non_null_ptr, ptr); } @@ -16963,7 +16984,8 @@ fn zirIsNonNullPtr( } fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion, .Undefined => return, else => return sema.fail(block, src, "expected error union type, found '{}'", .{ ty.fmt(sema.mod), @@ -16986,10 +17008,11 @@ fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = try sema.resolveInst(inst_data.operand); - try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2()); + try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2(mod)); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } @@ -17012,6 +17035,7 @@ fn zirCondbr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); @@ -17052,7 +17076,7 @@ fn zirCondbr( const err_inst_data = sema.code.instructions.items(.data)[index].un_node; const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); - assert(operand_ty.zigTypeTag() == .ErrorUnion); + assert(operand_ty.zigTypeTag(mod) == .ErrorUnion); const result_ty = operand_ty.errorUnionSet(); break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand); }; @@ -17079,7 +17103,7 @@ fn zirCondbr( return always_noreturn; } -fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Ref { +fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -17087,7 +17111,8 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! const body = sema.code.extra[extra.end..][0..extra.data.body_len]; const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + const mod = sema.mod; + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(sema.mod), }); @@ -17124,7 +17149,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! return try_inst; } -fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Ref { +fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -17133,7 +17158,8 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand = try sema.resolveInst(extra.data.operand); const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + const mod = sema.mod; + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(sema.mod), }); @@ -17275,16 +17301,17 @@ fn zirRetImplicit( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const operand = try sema.resolveInst(inst_data.operand); const r_brace_src = inst_data.src(); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const base_tag = sema.fn_ret_ty.baseZigTypeTag(); + const base_tag = sema.fn_ret_ty.baseZigTypeTag(mod); if (base_tag == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "function declared '{}' implicitly returns", .{ - sema.fn_ret_ty.fmt(sema.mod), + sema.fn_ret_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{}); @@ -17294,7 +17321,7 @@ fn zirRetImplicit( } else if (base_tag != .Void) { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "function with non-void return type '{}' implicitly returns", .{ - sema.fn_ret_ty.fmt(sema.mod), + sema.fn_ret_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{}); @@ -17397,17 +17424,19 @@ fn retWithErrTracing( } fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool { - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return false; + const mod = sema.mod; + if (!mod.backendSupportsFeature(.error_return_trace)) return false; - return fn_ret_ty.isError() and - sema.mod.comp.bin_file.options.error_return_tracing; + return fn_ret_ty.isError(mod) and + mod.comp.bin_file.options.error_return_tracing; } fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].save_err_ret_index; - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return; - if (!sema.mod.comp.bin_file.options.error_return_tracing) return; + if (!mod.backendSupportsFeature(.error_return_trace)) return; + if (!mod.comp.bin_file.options.error_return_tracing) return; // This is only relevant at runtime. if (block.is_comptime or block.is_typeof) return; @@ -17415,7 +17444,7 @@ fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const save_index = inst_data.operand == .none or b: { const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - break :b operand_ty.isError(); + break :b operand_ty.isError(mod); }; if (save_index) @@ -17467,11 +17496,12 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) } fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { - assert(sema.fn_ret_ty.zigTypeTag() == .ErrorUnion); + const mod = sema.mod; + assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion); if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { const op_ty = sema.typeOf(uncasted_operand); - switch (op_ty.zigTypeTag()) { + switch (op_ty.zigTypeTag(mod)) { .ErrorSet => { try payload.data.addErrorSet(sema.gpa, op_ty); }, @@ -17492,7 +17522,8 @@ fn analyzeRet( // Special case for returning an error to an inferred error set; we need to // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. - if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) { + const mod = sema.mod; + if (sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(uncasted_operand); } const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) { @@ -17540,6 +17571,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].ptr_type; const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index); const elem_ty_src: LazySrcLoc = .{ .node_offset_ptr_elem = extra.data.src_node }; @@ -17582,7 +17614,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air break :blk 0; } } - const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?); try sema.validateAlign(block, align_src, abi_align); break :blk abi_align; } else 0; @@ -17591,7 +17623,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; break :blk try sema.analyzeAddressSpace(block, addrspace_src, ref, .pointer); - } else if (elem_ty.zigTypeTag() == .Fn and target.cpu.arch == .avr) .flash else .generic; + } else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic; const bit_offset = if (inst_data.flags.has_bit_range) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); @@ -17611,9 +17643,9 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, bitoffset_src, "bit offset starts after end of host integer", .{}); } - if (elem_ty.zigTypeTag() == .NoReturn) { + if (elem_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{}); - } else if (elem_ty.zigTypeTag() == .Fn) { + } else if (elem_ty.zigTypeTag(mod) == .Fn) { if (inst_data.size != .One) { return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{}); } @@ -17623,7 +17655,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air { return sema.fail(block, align_src, "function pointer alignment disagrees with function alignment", .{}); } - } else if (inst_data.size == .Many and elem_ty.zigTypeTag() == .Opaque) { + } else if (inst_data.size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_ty_src, "unknown-length pointer to opaque not allowed", .{}); } else if (inst_data.size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { @@ -17639,7 +17671,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); } - if (elem_ty.zigTypeTag() == .Opaque) { + if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_ty_src, "C pointers cannot point to opaque types", .{}); } } @@ -17666,8 +17698,9 @@ fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const obj_ty = try sema.resolveType(block, src, inst_data.operand); + const mod = sema.mod; - switch (obj_ty.zigTypeTag()) { + switch (obj_ty.zigTypeTag(mod)) { .Struct => return sema.structInitEmpty(block, obj_ty, src, src), .Array, .Vector => return sema.arrayInitEmpty(block, src, obj_ty), .Void => return sema.addConstant(obj_ty, Value.void), @@ -17696,9 +17729,10 @@ fn structInitEmpty( } fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref { + const mod = sema.mod; const arr_len = obj_ty.arrayLen(); if (arr_len != 0) { - if (obj_ty.zigTypeTag() == .Array) { + if (obj_ty.zigTypeTag(mod) == .Array) { return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len}); } else { return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); @@ -17766,13 +17800,14 @@ fn zirStructInit( const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index); const src = inst_data.src(); + const mod = sema.mod; const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data; const first_field_type_data = zir_datas[first_item.field_type].pl_node; const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data; const resolved_ty = try sema.resolveType(block, src, first_field_type_extra.container_type); try sema.resolveTypeLayout(resolved_ty); - if (resolved_ty.zigTypeTag() == .Struct) { + if (resolved_ty.zigTypeTag(mod) == .Struct) { // This logic must be synchronized with that in `zirStructInitEmpty`. // Maps field index to field_type index of where it was already initialized. @@ -17815,7 +17850,7 @@ fn zirStructInit( } found_fields[field_index] = item.data.field_type; field_inits[field_index] = try sema.resolveInst(item.data.init); - if (!is_packed) if (resolved_ty.structFieldValueComptime(field_index)) |default_value| { + if (!is_packed) if (resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveMaybeUndefVal(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; @@ -17827,7 +17862,7 @@ fn zirStructInit( } return sema.finishStructInit(block, src, src, field_inits, resolved_ty, is_ref); - } else if (resolved_ty.zigTypeTag() == .Union) { + } else if (resolved_ty.zigTypeTag(mod) == .Union) { if (extra.data.fields_len != 1) { return sema.fail(block, src, "union initialization expects exactly one field", .{}); } @@ -18014,6 +18049,7 @@ fn zirStructInitAnon( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index); @@ -18050,7 +18086,7 @@ fn zirStructInitAnon( const init = try sema.resolveInst(item.data.init); field_ty.* = sema.typeOf(init); - if (types[i].zigTypeTag() == .Opaque) { + if (types[i].zigTypeTag(mod) == .Opaque) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); @@ -18148,15 +18184,16 @@ fn zirArrayInit( const array_ty = try sema.resolveType(block, src, args[0]); const sentinel_val = array_ty.sentinel(); + const mod = sema.mod; const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null)); defer gpa.free(resolved_args); for (args[1..], 0..) |arg, i| { const resolved_arg = try sema.resolveInst(arg); - const elem_ty = if (array_ty.zigTypeTag() == .Struct) + const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) array_ty.structFieldType(i) else - array_ty.elemType2(); + array_ty.elemType2(mod); resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); @@ -18169,7 +18206,7 @@ fn zirArrayInit( } if (sentinel_val) |some| { - resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(), some); + resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(mod), some); } const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| { @@ -18227,7 +18264,7 @@ fn zirArrayInit( const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = array_ty.elemType2(), + .pointee_type = array_ty.elemType2(mod), }); const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty); @@ -18252,6 +18289,7 @@ fn zirArrayInitAnon( const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const operands = sema.code.refSlice(extra.end, extra.data.operands_len); + const mod = sema.mod; const types = try sema.arena.alloc(Type, operands.len); const values = try sema.arena.alloc(Value, operands.len); @@ -18262,7 +18300,7 @@ fn zirArrayInitAnon( const operand_src = src; // TODO better source location const elem = try sema.resolveInst(operand); types[i] = sema.typeOf(elem); - if (types[i].zigTypeTag() == .Opaque) { + if (types[i].zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); @@ -18379,11 +18417,12 @@ fn fieldType( field_src: LazySrcLoc, ty_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; var cur_ty = aggregate_ty; while (true) { const resolved_ty = try sema.resolveTypeFields(cur_ty); cur_ty = resolved_ty; - switch (cur_ty.zigTypeTag()) { + switch (cur_ty.zigTypeTag(mod)) { .Struct => { if (cur_ty.isAnonStruct()) { const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); @@ -18449,14 +18488,14 @@ fn zirFrame( } fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); if (ty.isNoReturn()) { return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } - const target = sema.mod.getTarget(); - const val = try ty.lazyAbiAlignment(target, sema.arena); + const val = try ty.lazyAbiAlignment(mod, sema.arena); if (val.tag() == .lazy_align) { try sema.queueFullTypeResolution(ty); } @@ -18499,16 +18538,17 @@ fn zirUnaryMath( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, .Vector => { - const scalar_ty = operand_ty.scalarType(); - switch (scalar_ty.zigTypeTag()) { + const scalar_ty = operand_ty.scalarType(mod); + switch (scalar_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{scalar_ty.fmt(sema.mod)}), } @@ -18516,9 +18556,9 @@ fn zirUnaryMath( else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{operand_ty.fmt(sema.mod)}), } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Vector => { - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const vec_len = operand_ty.vectorLen(); const result_ty = try Type.vector(sema.arena, vec_len, scalar_ty); if (try sema.resolveMaybeUndefVal(operand)) |val| { @@ -18564,7 +18604,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const mod = sema.mod; try sema.resolveTypeLayout(operand_ty); - const enum_ty = switch (operand_ty.zigTypeTag()) { + const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); const bytes = val.castTag(.enum_literal).?.data; @@ -18654,11 +18694,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const bits_val = struct_val[1]; const signedness = signedness_val.toEnum(std.builtin.Signedness); - const bits = @intCast(u16, bits_val.toUnsignedInt(target)); - const ty = switch (signedness) { - .signed => try Type.Tag.int_signed.create(sema.arena, bits), - .unsigned => try Type.Tag.int_unsigned.create(sema.arena, bits), - }; + const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); + const ty = try mod.intType(signedness, bits); return sema.addType(ty); }, .Vector => { @@ -18667,9 +18704,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const len_val = struct_val[0]; const child_val = struct_val[1]; - const len = len_val.toUnsignedInt(target); - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = child_val.toType(&buffer); + const len = len_val.toUnsignedInt(mod); + const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); @@ -18682,7 +18718,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // bits: comptime_int, const bits_val = struct_val[0]; - const bits = @intCast(u16, bits_val.toUnsignedInt(target)); + const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, @@ -18708,10 +18744,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?); - var buffer: Value.ToTypeBuffer = undefined; - const unresolved_elem_ty = child_val.toType(&buffer); + const unresolved_elem_ty = child_val.toType(); const elem_ty = if (abi_align == 0) unresolved_elem_ty else t: { @@ -18723,7 +18758,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const ptr_size = size_val.toEnum(std.builtin.Type.Pointer.Size); var actual_sentinel: ?Value = null; - if (!sentinel_val.isNull()) { + if (!sentinel_val.isNull(mod)) { if (ptr_size == .One or ptr_size == .C) { return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); } @@ -18735,9 +18770,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in actual_sentinel = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; } - if (elem_ty.zigTypeTag() == .NoReturn) { + if (elem_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "pointer to noreturn not allowed", .{}); - } else if (elem_ty.zigTypeTag() == .Fn) { + } else if (elem_ty.zigTypeTag(mod) == .Fn) { if (ptr_size != .One) { return sema.fail(block, src, "function pointers must be single pointers", .{}); } @@ -18747,7 +18782,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in { return sema.fail(block, src, "function pointer alignment disagrees with function alignment", .{}); } - } else if (ptr_size == .Many and elem_ty.zigTypeTag() == .Opaque) { + } else if (ptr_size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, src, "unknown-length pointer to opaque not allowed", .{}); } else if (ptr_size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { @@ -18763,7 +18798,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in }; return sema.failWithOwnedErrorMsg(msg); } - if (elem_ty.zigTypeTag() == .Opaque) { + if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, src, "C pointers cannot point to opaque types", .{}); } } @@ -18790,9 +18825,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // sentinel: ?*const anyopaque, const sentinel_val = struct_val[2]; - const len = len_val.toUnsignedInt(target); - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = try child_val.toType(&buffer).copy(sema.arena); + const len = len_val.toUnsignedInt(mod); + const child_ty = try child_val.toType().copy(sema.arena); const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: { const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, @@ -18810,8 +18844,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // child: type, const child_val = struct_val[0]; - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = try child_val.toType(&buffer).copy(sema.arena); + const child_ty = try child_val.toType().copy(sema.arena); const ty = try Type.optional(sema.arena, child_ty); return sema.addType(ty); @@ -18824,11 +18857,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // payload: type, const payload_val = struct_val[1]; - var buffer: Value.ToTypeBuffer = undefined; - const error_set_ty = try error_set_val.toType(&buffer).copy(sema.arena); - const payload_ty = try payload_val.toType(&buffer).copy(sema.arena); + const error_set_ty = try error_set_val.toType().copy(sema.arena); + const payload_ty = try payload_val.toType().copy(sema.arena); - if (error_set_ty.zigTypeTag() != .ErrorSet) { + if (error_set_ty.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{}); } @@ -18839,11 +18871,11 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.addType(ty); }, .ErrorSet => { - const payload_val = union_val.val.optionalValue() orelse + const payload_val = union_val.val.optionalValue(mod) orelse return sema.addType(Type.initTag(.anyerror)); const slice_val = payload_val.castTag(.slice).?.data; - const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod.getTarget())); + const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod)); var names: Module.ErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); var i: usize = 0; @@ -18890,7 +18922,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.fail(block, src, "reified structs must have no decls", .{}); } - if (layout != .Packed and !backing_int_val.isNull()) { + if (layout != .Packed and !backing_int_val.isNull(mod)) { return sema.fail(block, src, "non-packed struct does not support backing integer type", .{}); } @@ -18954,10 +18986,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in }; // Enum tag type - var buffer: Value.ToTypeBuffer = undefined; - const int_tag_ty = try tag_type_val.toType(&buffer).copy(new_decl_arena_allocator); + const int_tag_ty = try tag_type_val.toType().copy(new_decl_arena_allocator); - if (int_tag_ty.zigTypeTag() != .Int) { + if (int_tag_ty.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); } enum_obj.tag_ty = int_tag_ty; @@ -19090,7 +19121,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const new_decl_arena_allocator = new_decl_arena.allocator(); const union_obj = try new_decl_arena_allocator.create(Module.Union); - const type_tag = if (!tag_type_val.isNull()) + const type_tag = if (!tag_type_val.isNull(mod)) Type.Tag.union_tagged else if (layout != .Auto) Type.Tag.@"union" @@ -19130,11 +19161,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in var tag_ty_field_names: ?Module.EnumFull.NameMap = null; var enum_field_names: ?*Module.EnumNumbered.NameMap = null; const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - if (tag_type_val.optionalValue()) |payload_val| { - var buffer: Value.ToTypeBuffer = undefined; - union_obj.tag_ty = try payload_val.toType(&buffer).copy(new_decl_arena_allocator); + if (tag_type_val.optionalValue(mod)) |payload_val| { + union_obj.tag_ty = try payload_val.toType().copy(new_decl_arena_allocator); - if (union_obj.tag_ty.zigTypeTag() != .Enum) { + if (union_obj.tag_ty.zigTypeTag(mod) != .Enum) { return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}); } tag_ty_field_names = try union_obj.tag_ty.enumFields().clone(sema.arena); @@ -19187,14 +19217,13 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.fail(block, src, "duplicate union field {s}", .{field_name}); } - var buffer: Value.ToTypeBuffer = undefined; - const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator); + const field_ty = try type_val.toType().copy(new_decl_arena_allocator); gop.value_ptr.* = .{ .ty = field_ty, - .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?), + .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?), }; - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{}); errdefer msg.destroy(sema.gpa); @@ -19216,7 +19245,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -19280,20 +19309,18 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const alignment = @intCast(u29, alignment_val.toUnsignedInt(target)); + const alignment = @intCast(u29, alignment_val.toUnsignedInt(mod)); if (alignment == target_util.defaultFunctionAlignment(target)) { break :alignment 0; } else { break :alignment alignment; } }; - const return_type = return_type_val.optionalValue() orelse + const return_type = return_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); - var buf: Value.ToTypeBuffer = undefined; - const args_slice_val = args_val.castTag(.slice).?.data; - const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod.getTarget())); + const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod)); const param_types = try sema.arena.alloc(Type, args_len); const comptime_params = try sema.arena.alloc(bool, args_len); @@ -19316,12 +19343,12 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{}); } - const param_type_val = param_type_opt_val.optionalValue() orelse + const param_type_val = param_type_opt_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); - const param_type = try param_type_val.toType(&buf).copy(sema.arena); + const param_type = try param_type_val.toType().copy(sema.arena); if (arg_is_noalias) { - if (!param_type.isPtrAtRuntime()) { + if (!param_type.isPtrAtRuntime(mod)) { return sema.fail(block, src, "non-pointer parameter declared noalias", .{}); } noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse @@ -19336,7 +19363,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in .param_types = param_types, .comptime_params = comptime_params.ptr, .noalias_bits = noalias_bits, - .return_type = try return_type.toType(&buf).copy(sema.arena), + .return_type = try return_type.toType().copy(sema.arena), .alignment = alignment, .cc = cc, .is_var_args = is_var_args, @@ -19396,8 +19423,6 @@ fn reifyStruct( }, }; - const target = mod.getTarget(); - // Fields const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); @@ -19420,7 +19445,7 @@ fn reifyStruct( if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?); if (layout == .Packed) { if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{}); @@ -19461,7 +19486,7 @@ fn reifyStruct( return sema.fail(block, src, "duplicate struct field {s}", .{field_name}); } - const default_val = if (default_value_val.optionalValue()) |opt_val| blk: { + const default_val = if (default_value_val.optionalValue(mod)) |opt_val| blk: { const payload_val = if (opt_val.pointerDecl()) |opt_decl| mod.declPtr(opt_decl).val else @@ -19472,8 +19497,7 @@ fn reifyStruct( return sema.fail(block, src, "comptime field without default initialization value", .{}); } - var buffer: Value.ToTypeBuffer = undefined; - const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator); + const field_ty = try type_val.toType().copy(new_decl_arena_allocator); gop.value_ptr.* = .{ .ty = field_ty, .abi_align = abi_align, @@ -19482,7 +19506,7 @@ fn reifyStruct( .offset = undefined, }; - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); @@ -19492,7 +19516,7 @@ fn reifyStruct( }; return sema.failWithOwnedErrorMsg(msg); } - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{}); errdefer msg.destroy(sema.gpa); @@ -19514,7 +19538,7 @@ fn reifyStruct( break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -19545,20 +19569,15 @@ fn reifyStruct( var fields_bit_sum: u64 = 0; for (struct_obj.fields.values()) |field| { - fields_bit_sum += field.ty.bitSize(target); + fields_bit_sum += field.ty.bitSize(mod); } - if (backing_int_val.optionalValue()) |payload| { - var buf: Value.ToTypeBuffer = undefined; - const backing_int_ty = payload.toType(&buf); + if (backing_int_val.optionalValue(mod)) |payload| { + const backing_int_ty = payload.toType(); try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); struct_obj.backing_int_ty = try backing_int_ty.copy(new_decl_arena_allocator); } else { - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, fields_bit_sum), - }; - struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(new_decl_arena_allocator); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); } struct_obj.status = .have_layout; @@ -19569,6 +19588,7 @@ fn reifyStruct( } fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const addrspace_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -19594,7 +19614,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst ptr_info.@"addrspace" = dest_addrspace; const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); - const dest_ty = if (ptr_ty.zigTypeTag() == .Optional) + const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional) try Type.optional(sema.arena, dest_ptr_ty) else dest_ptr_ty; @@ -19716,6 +19736,7 @@ fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19730,12 +19751,12 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveMaybeUndefVal(operand)) |val| { const result_val = try sema.floatToInt(block, operand_src, val, operand_ty, dest_ty); return sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeInt) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_int' must be comptime-known"); } try sema.requireRuntimeBlock(block, inst_data.src(), operand_src); - if (dest_ty.intInfo(sema.mod.getTarget()).bits == 0) { + if (dest_ty.intInfo(mod).bits == 0) { if (block.wantSafety()) { const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, Value.zero)); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); @@ -19755,6 +19776,7 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19769,7 +19791,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveMaybeUndefVal(operand)) |val| { const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, sema.mod, sema); return sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeFloat) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -19778,6 +19800,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -19790,9 +19813,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_ty = try sema.resolveType(block, src, extra.lhs); try sema.checkPtrType(block, type_src, ptr_ty); - const elem_ty = ptr_ty.elemType2(); - const target = sema.mod.getTarget(); - const ptr_align = try ptr_ty.ptrAlignmentAdvanced(target, sema); + const elem_ty = ptr_ty.elemType2(mod); + const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); if (ptr_ty.isSlice()) { const msg = msg: { @@ -19805,8 +19827,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| { - const addr = val.toUnsignedInt(target); - if (!ptr_ty.isAllowzeroPtr() and addr == 0) + const addr = val.toUnsignedInt(mod); + if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)}); if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)}); @@ -19820,8 +19842,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } try sema.requireRuntimeBlock(block, src, operand_src); - if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag() == .Fn)) { - if (!ptr_ty.isAllowzeroPtr()) { + if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) { + if (!ptr_ty.isAllowzeroPtr(mod)) { const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); try sema.addSafetyCheck(block, is_non_zero, .cast_to_null); } @@ -19926,6 +19948,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19934,7 +19957,6 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); try sema.checkPtrType(block, dest_ty_src, dest_ty); try sema.checkPtrOperand(block, operand_src, operand_ty); @@ -19982,18 +20004,18 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air else operand; - const dest_elem_ty = dest_ty.elemType2(); + const dest_elem_ty = dest_ty.elemType2(mod); try sema.resolveTypeLayout(dest_elem_ty); - const dest_align = dest_ty.ptrAlignment(target); + const dest_align = dest_ty.ptrAlignment(mod); - const operand_elem_ty = operand_ty.elemType2(); + const operand_elem_ty = operand_ty.elemType2(mod); try sema.resolveTypeLayout(operand_elem_ty); - const operand_align = operand_ty.ptrAlignment(target); + const operand_align = operand_ty.ptrAlignment(mod); // If the destination is less aligned than the source, preserve the source alignment const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: { // Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result - if (dest_ty.zigTypeTag() == .Optional) { + if (dest_ty.zigTypeTag(mod) == .Optional) { var buf: Type.Payload.ElemType = undefined; var dest_ptr_info = dest_ty.optionalChild(&buf).ptrInfo().data; dest_ptr_info.@"align" = operand_align; @@ -20006,8 +20028,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; if (dest_is_slice) { - const operand_elem_size = operand_elem_ty.abiSize(target); - const dest_elem_size = dest_elem_ty.abiSize(target); + const operand_elem_size = operand_elem_ty.abiSize(mod); + const dest_elem_size = dest_elem_ty.abiSize(mod); if (operand_elem_size != dest_elem_size) { return sema.fail(block, dest_ty_src, "TODO: implement @ptrCast between slices changing the length", .{}); } @@ -20032,21 +20054,21 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } if (try sema.resolveMaybeUndefVal(ptr)) |operand_val| { - if (!dest_ty.ptrAllowsZero() and operand_val.isUndef()) { + if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef()) { return sema.failWithUseOfUndef(block, operand_src); } - if (!dest_ty.ptrAllowsZero() and operand_val.isNull()) { + if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } - if (dest_ty.zigTypeTag() == .Optional and sema.typeOf(ptr).zigTypeTag() != .Optional) { + if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) { return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, operand_val)); } return sema.addConstant(aligned_dest_ty, operand_val); } try sema.requireRuntimeBlock(block, src, null); - if (block.wantSafety() and operand_ty.ptrAllowsZero() and !dest_ty.ptrAllowsZero() and - (try sema.typeHasRuntimeBits(dest_ty.elemType2()) or dest_ty.elemType2().zigTypeTag() == .Fn)) + if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and + (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { const ptr_int = try block.addUnOp(.ptrtoint, ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); @@ -20102,6 +20124,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD } fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -20112,7 +20135,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_scalar_ty); const operand_ty = sema.typeOf(operand); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); - const is_vector = operand_ty.zigTypeTag() == .Vector; + const is_vector = operand_ty.zigTypeTag(mod) == .Vector; const dest_ty = if (is_vector) try Type.vector(sema.arena, operand_ty.vectorLen(), dest_scalar_ty) else @@ -20122,15 +20145,14 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.coerce(block, dest_ty, operand, operand_src); } - const target = sema.mod.getTarget(); - const dest_info = dest_scalar_ty.intInfo(target); + const dest_info = dest_scalar_ty.intInfo(mod); if (try sema.typeHasOnePossibleValue(dest_ty)) |val| { return sema.addConstant(dest_ty, val); } - if (operand_scalar_ty.zigTypeTag() != .ComptimeInt) { - const operand_info = operand_ty.intInfo(target); + if (operand_scalar_ty.zigTypeTag(mod) != .ComptimeInt) { + const operand_info = operand_ty.intInfo(mod); if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return sema.addConstant(operand_ty, val); } @@ -20186,6 +20208,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -20199,12 +20222,12 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A var ptr_info = ptr_ty.ptrInfo().data; ptr_info.@"align" = dest_align; var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); - if (ptr_ty.zigTypeTag() == .Optional) { + if (ptr_ty.zigTypeTag(mod) == .Optional) { dest_ty = try Type.Tag.optional.create(sema.arena, dest_ty); } if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |val| { - if (try val.getUnsignedIntAdvanced(sema.mod.getTarget(), null)) |addr| { + if (try val.getUnsignedIntAdvanced(mod, null)) |addr| { if (addr % dest_align != 0) { return sema.fail(block, ptr_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align }); } @@ -20247,23 +20270,23 @@ fn zirBitCount( block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, - comptime comptimeOp: fn (val: Value, ty: Type, target: std.Target) u64, + comptime comptimeOp: fn (val: Value, ty: Type, mod: *const Module) u64, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); _ = try sema.checkIntOrVector(block, operand, operand_src); - const target = sema.mod.getTarget(); - const bits = operand_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return sema.addConstant(operand_ty, val); } - const result_scalar_ty = try Type.smallestUnsignedInt(sema.arena, bits); - switch (operand_ty.zigTypeTag()) { + const result_scalar_ty = try mod.smallestUnsignedInt(bits); + switch (operand_ty.zigTypeTag(mod)) { .Vector => { const vec_len = operand_ty.vectorLen(); const result_ty = try Type.vector(sema.arena, vec_len, result_scalar_ty); @@ -20272,10 +20295,10 @@ fn zirBitCount( var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - const count = comptimeOp(elem_val, scalar_ty, target); + const count = comptimeOp(elem_val, scalar_ty, mod); elem.* = try Value.Tag.int_u64.create(sema.arena, count); } return sema.addConstant( @@ -20291,7 +20314,7 @@ fn zirBitCount( if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_scalar_ty); try sema.resolveLazyValue(val); - return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, target)); + return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, mod)); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_scalar_ty, operand); @@ -20302,14 +20325,14 @@ fn zirBitCount( } fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src); - const target = sema.mod.getTarget(); - const bits = scalar_ty.intInfo(target).bits; + const bits = scalar_ty.intInfo(mod).bits; if (bits % 8 != 0) { return sema.fail( block, @@ -20323,11 +20346,11 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant(operand_ty, val); } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(operand_ty); - const result_val = try val.byteSwap(operand_ty, target, sema.arena); + const result_val = try val.byteSwap(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20344,7 +20367,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.byteSwap(operand_ty, target, sema.arena); + elem.* = try elem_val.byteSwap(operand_ty, mod, sema.arena); } return sema.addConstant( operand_ty, @@ -20371,12 +20394,12 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return sema.addConstant(operand_ty, val); } - const target = sema.mod.getTarget(); - switch (operand_ty.zigTypeTag()) { + const mod = sema.mod; + switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(operand_ty); - const result_val = try val.bitReverse(operand_ty, target, sema.arena); + const result_val = try val.bitReverse(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20393,7 +20416,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.bitReverse(scalar_ty, target, sema.arena); + elem.* = try elem_val.bitReverse(scalar_ty, mod, sema.arena); } return sema.addConstant( operand_ty, @@ -20429,10 +20452,10 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 const ty = try sema.resolveType(block, lhs_src, extra.lhs); const field_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "name of field must be comptime-known"); - const target = sema.mod.getTarget(); + const mod = sema.mod; try sema.resolveTypeLayout(ty); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => {}, else => { const msg = msg: { @@ -20464,15 +20487,16 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 if (i == field_index) { return bit_sum; } - bit_sum += field.ty.bitSize(target); + bit_sum += field.ty.bitSize(mod); } else unreachable; }, - else => return ty.structFieldOffset(field_index, target) * 8, + else => return ty.structFieldOffset(field_index, mod) * 8, } } fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Struct, .Enum, .Union, .Opaque => return, else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(sema.mod)}), } @@ -20480,7 +20504,8 @@ fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Com /// Returns `true` if the type was a comptime_int. fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { - switch (try ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try ty.zigTypeTagOrPoison(mod)) { .ComptimeInt => return true, .Int => return false, else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(sema.mod)}), @@ -20493,7 +20518,8 @@ fn checkInvalidPtrArithmetic( src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (try ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try ty.zigTypeTagOrPoison(mod)) { .Pointer => switch (ty.ptrSize()) { .One, .Slice => return, .Many, .C => return sema.fail( @@ -20532,7 +20558,8 @@ fn checkPtrOperand( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => return, .Fn => { const msg = msg: { @@ -20550,7 +20577,7 @@ fn checkPtrOperand( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional()) return, + .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); @@ -20562,7 +20589,8 @@ fn checkPtrType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => return, .Fn => { const msg = msg: { @@ -20580,7 +20608,7 @@ fn checkPtrType( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional()) return, + .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); @@ -20592,9 +20620,10 @@ fn checkVectorElemType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Int, .Float, .Bool => return, - else => if (ty.isPtrAtRuntime()) return, + else => if (ty.isPtrAtRuntime(mod)) return, } return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(sema.mod)}); } @@ -20605,7 +20634,8 @@ fn checkFloatType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ComptimeInt, .ComptimeFloat, .Float => {}, else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(sema.mod)}), } @@ -20617,9 +20647,10 @@ fn checkNumericType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, @@ -20637,9 +20668,9 @@ fn checkAtomicPtrOperand( ptr_src: LazySrcLoc, ptr_const: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); - var diag: target_util.AtomicPtrAlignmentDiagnostics = .{}; - const alignment = target_util.atomicPtrAlignment(target, elem_ty, &diag) catch |err| switch (err) { + const mod = sema.mod; + var diag: Module.AtomicPtrAlignmentDiagnostics = .{}; + const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) { error.FloatTooBig => return sema.fail( block, elem_ty_src, @@ -20668,7 +20699,7 @@ fn checkAtomicPtrOperand( }; const ptr_ty = sema.typeOf(ptr); - const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison()) { + const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { .Pointer => ptr_ty.ptrInfo().data, else => { const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); @@ -20735,12 +20766,13 @@ fn checkIntOrVector( operand: Air.Inst.Ref, operand_src: LazySrcLoc, ) CompileError!Type { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - switch (try operand_ty.zigTypeTagOrPoison()) { + switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int => return operand_ty, .Vector => { const elem_ty = operand_ty.childType(); - switch (try elem_ty.zigTypeTagOrPoison()) { + switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ elem_ty.fmt(sema.mod), @@ -20759,11 +20791,12 @@ fn checkIntOrVectorAllowComptime( operand_ty: Type, operand_src: LazySrcLoc, ) CompileError!Type { - switch (try operand_ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return operand_ty, .Vector => { const elem_ty = operand_ty.childType(); - switch (try elem_ty.zigTypeTagOrPoison()) { + switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ elem_ty.fmt(sema.mod), @@ -20777,7 +20810,8 @@ fn checkIntOrVectorAllowComptime( } fn checkErrorSetType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ErrorSet => return, else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(sema.mod)}), } @@ -20805,11 +20839,12 @@ fn checkSimdBinOp( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!SimdBinOp { + const mod = sema.mod; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - var vec_len: ?usize = if (lhs_ty.zigTypeTag() == .Vector) lhs_ty.vectorLen() else null; + var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen() else null; const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); @@ -20823,7 +20858,7 @@ fn checkSimdBinOp( .lhs_val = try sema.resolveMaybeUndefVal(lhs), .rhs_val = try sema.resolveMaybeUndefVal(rhs), .result_ty = result_ty, - .scalar_ty = result_ty.scalarType(), + .scalar_ty = result_ty.scalarType(mod), }; } @@ -20836,8 +20871,9 @@ fn checkVectorizableBinaryOperands( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!void { - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const mod = sema.mod; + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return; const lhs_is_vector = switch (lhs_zig_ty_tag) { @@ -20892,6 +20928,7 @@ fn resolveExportOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.ExportOptions { + const mod = sema.mod; const export_options_ty = try sema.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); @@ -20904,7 +20941,7 @@ fn resolveExportOptions( const name_operand = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); const name_ty = Type.initTag(.const_slice_u8); - const name = try name_val.toAllocatedBytes(name_ty, sema.arena, sema.mod); + const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known"); @@ -20913,8 +20950,8 @@ fn resolveExportOptions( const section_operand = try sema.fieldVal(block, src, options, "section", section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); const section_ty = Type.initTag(.const_slice_u8); - const section = if (section_opt_val.optionalValue()) |section_val| - try section_val.toAllocatedBytes(section_ty, sema.arena, sema.mod) + const section = if (section_opt_val.optionalValue(mod)) |section_val| + try section_val.toAllocatedBytes(section_ty, sema.arena, mod) else null; @@ -20979,6 +21016,7 @@ fn zirCmpxchg( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data; const air_tag: Air.Inst.Tag = switch (extended.small) { 0 => .cmpxchg_weak, @@ -20996,7 +21034,7 @@ fn zirCmpxchg( // zig fmt: on const expected_value = try sema.resolveInst(extra.expected_value); const elem_ty = sema.typeOf(expected_value); - if (elem_ty.zigTypeTag() == .Float) { + if (elem_ty.zigTypeTag(mod) == .Float) { return sema.fail( block, elem_ty_src, @@ -21102,26 +21140,26 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const operation = try sema.resolveBuiltinEnum(block, op_src, extra.lhs, "ReduceOp", "@reduce operation must be comptime-known"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); + const mod = sema.mod; - if (operand_ty.zigTypeTag() != .Vector) { - return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(sema.mod)}); + if (operand_ty.zigTypeTag(mod) != .Vector) { + return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)}); } const scalar_ty = operand_ty.childType(); // Type-check depending on operation. switch (operation) { - .And, .Or, .Xor => switch (scalar_ty.zigTypeTag()) { + .And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Bool => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{ - @tagName(operation), operand_ty.fmt(sema.mod), + @tagName(operation), operand_ty.fmt(mod), }), }, - .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag()) { + .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Float => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{ - @tagName(operation), operand_ty.fmt(sema.mod), + @tagName(operation), operand_ty.fmt(mod), }), }, } @@ -21136,19 +21174,19 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty); - var accum: Value = try operand_val.elemValue(sema.mod, sema.arena, 0); + var accum: Value = try operand_val.elemValue(mod, sema.arena, 0); var elem_buf: Value.ElemValueBuffer = undefined; var i: u32 = 1; while (i < vec_len) : (i += 1) { - const elem_val = operand_val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = operand_val.elemValueBuffer(mod, i, &elem_buf); switch (operation) { - .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, sema.mod), - .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, sema.mod), - .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, sema.mod), - .Min => accum = accum.numberMin(elem_val, target), - .Max => accum = accum.numberMax(elem_val, target), + .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod), + .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod), + .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, mod), + .Min => accum = accum.numberMin(elem_val, mod), + .Max => accum = accum.numberMax(elem_val, mod), .Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty), - .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, sema.mod), + .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, mod), } } return sema.addConstant(scalar_ty, accum); @@ -21165,6 +21203,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data; const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -21177,7 +21216,7 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air var mask = try sema.resolveInst(extra.mask); var mask_ty = sema.typeOf(mask); - const mask_len = switch (sema.typeOf(mask).zigTypeTag()) { + const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) { .Array, .Vector => sema.typeOf(mask).arrayLen(), else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}), }; @@ -21200,6 +21239,7 @@ fn analyzeShuffle( mask: Value, mask_len: u32, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const a_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = src_node }; const b_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = src_node }; const mask_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = src_node }; @@ -21211,7 +21251,7 @@ fn analyzeShuffle( .elem_type = elem_ty, }); - var maybe_a_len = switch (sema.typeOf(a).zigTypeTag()) { + var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) { .Array, .Vector => sema.typeOf(a).arrayLen(), .Undefined => null, else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{ @@ -21219,7 +21259,7 @@ fn analyzeShuffle( sema.typeOf(a).fmt(sema.mod), }), }; - var maybe_b_len = switch (sema.typeOf(b).zigTypeTag()) { + var maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) { .Array, .Vector => sema.typeOf(b).arrayLen(), .Undefined => null, else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{ @@ -21255,7 +21295,7 @@ fn analyzeShuffle( var buf: Value.ElemValueBuffer = undefined; const elem = mask.elemValueBuffer(sema.mod, i, &buf); if (elem.isUndef()) continue; - const int = elem.toSignedInt(sema.mod.getTarget()); + const int = elem.toSignedInt(mod); var unsigned: u32 = undefined; var chosen: u32 = undefined; if (int >= 0) { @@ -21297,7 +21337,7 @@ fn analyzeShuffle( values[i] = Value.undef; continue; } - const int = mask_elem_val.toSignedInt(sema.mod.getTarget()); + const int = mask_elem_val.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); if (int >= 0) { values[i] = try a_val.elemValue(sema.mod, sema.arena, unsigned); @@ -21356,6 +21396,7 @@ fn analyzeShuffle( } fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); @@ -21369,7 +21410,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const pred_uncoerced = try sema.resolveInst(extra.pred); const pred_ty = sema.typeOf(pred_uncoerced); - const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison()) { + const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { .Vector, .Array => pred_ty.arrayLen(), else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}), }; @@ -21489,6 +21530,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data; const src = inst_data.src(); @@ -21505,7 +21547,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Enum => if (op != .Xchg) { return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{}); }, @@ -21536,7 +21578,6 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A break :rs operand_src; }; if (ptr_val.isComptimeMutablePtr()) { - const target = sema.mod.getTarget(); const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const new_val = switch (op) { @@ -21544,12 +21585,12 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Xchg => operand_val, .Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty), .Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty), - .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, sema.mod), - .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, sema.mod), - .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, sema.mod), - .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, sema.mod), - .Max => stored_val.numberMax (operand_val, target), - .Min => stored_val.numberMin (operand_val, target), + .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, mod), + .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, mod), + .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, mod), + .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, mod), + .Max => stored_val.numberMax (operand_val, mod), + .Min => stored_val.numberMin (operand_val, mod), // zig fmt: on }; try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty); @@ -21623,8 +21664,9 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const maybe_mulend1 = try sema.resolveMaybeUndefVal(mulend1); const maybe_mulend2 = try sema.resolveMaybeUndefVal(mulend2); const maybe_addend = try sema.resolveMaybeUndefVal(addend); + const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .Vector => {}, else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(sema.mod)}), } @@ -21743,7 +21785,6 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const callee_ty = sema.typeOf(func); const func_ty = try sema.checkCallArgumentCount(block, func, func_src, callee_ty, resolved_args.len, false); - const ensure_result_used = extra.flags.ensure_result_used; return sema.analyzeCall(block, func, func_ty, func_src, call_src, modifier, ensure_result_used, resolved_args, null, null); } @@ -21760,13 +21801,14 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const field_name = try sema.resolveConstString(block, name_src, extra.field_name, "field name must be comptime-known"); const field_ptr = try sema.resolveInst(extra.field_ptr); const field_ptr_ty = sema.typeOf(field_ptr); + const mod = sema.mod; - if (parent_ty.zigTypeTag() != .Struct and parent_ty.zigTypeTag() != .Union) { + if (parent_ty.zigTypeTag(mod) != .Struct and parent_ty.zigTypeTag(mod) != .Union) { return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)}); } try sema.resolveTypeLayout(parent_ty); - const field_index = switch (parent_ty.zigTypeTag()) { + const field_index = switch (parent_ty.zigTypeTag(mod)) { .Struct => blk: { if (parent_ty.isTuple()) { if (mem.eql(u8, field_name, "len")) { @@ -21781,7 +21823,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr else => unreachable, }; - if (parent_ty.zigTypeTag() == .Struct and parent_ty.structFieldIsComptime(field_index)) { + if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index)) { return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{}); } @@ -21913,15 +21955,14 @@ fn analyzeMinMax( ) CompileError!Air.Inst.Ref { assert(operands.len == operand_srcs.len); assert(operands.len > 0); + const mod = sema.mod; if (operands.len == 1) return operands[0]; - const mod = sema.mod; - const target = mod.getTarget(); const opFunc = switch (air_tag) { .min => Value.numberMin, .max => Value.numberMax, - else => unreachable, + else => @compileError("unreachable"), }; // First, find all comptime-known arguments, and get their min/max @@ -21949,7 +21990,7 @@ fn analyzeMinMax( try sema.resolveLazyValue(operand_val); const vec_len = simd_op.len orelse { - const result_val = opFunc(cur_val, operand_val, target); + const result_val = opFunc(cur_val, operand_val, mod); cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); continue; }; @@ -21959,7 +22000,7 @@ fn analyzeMinMax( for (elems, 0..) |*elem, i| { const lhs_elem_val = cur_val.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem_val = operand_val.elemValueBuffer(mod, i, &rhs_buf); - elem.* = opFunc(lhs_elem_val, rhs_elem_val, target); + elem.* = opFunc(lhs_elem_val, rhs_elem_val, mod); } cur_minmax = try sema.addConstant( simd_op.result_ty, @@ -21984,7 +22025,7 @@ fn analyzeMinMax( break :refined orig_ty; } - const refined_ty = if (orig_ty.zigTypeTag() == .Vector) blk: { + const refined_ty = if (orig_ty.zigTypeTag(mod) == .Vector) blk: { const elem_ty = orig_ty.childType(); const len = orig_ty.vectorLen(); @@ -21996,16 +22037,16 @@ fn analyzeMinMax( for (1..len) |idx| { const elem_val = try val.elemValue(mod, sema.arena, idx); if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef - if (Value.order(elem_val, cur_min, target).compare(.lt)) cur_min = elem_val; - if (Value.order(elem_val, cur_max, target).compare(.gt)) cur_max = elem_val; + if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val; + if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val; } - const refined_elem_ty = try Type.intFittingRange(target, sema.arena, cur_min, cur_max); + const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max); break :blk try Type.vector(sema.arena, len, refined_elem_ty); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats if (val.isUndef()) break :blk orig_ty; // can't refine undef - break :blk try Type.intFittingRange(target, sema.arena, val, val); + break :blk try mod.intFittingRange(val, val); }; // Apply the refined type to the current value - this isn't strictly necessary in the @@ -22061,7 +22102,7 @@ fn analyzeMinMax( // Finally, refine the type based on the comptime-known bound. if (known_undef) break :refine; // can't refine undef const unrefined_ty = sema.typeOf(cur_minmax.?); - const is_vector = unrefined_ty.zigTypeTag() == .Vector; + const is_vector = unrefined_ty.zigTypeTag(mod) == .Vector; const comptime_elem_ty = if (is_vector) comptime_ty.childType() else comptime_ty; const unrefined_elem_ty = if (is_vector) unrefined_ty.childType() else unrefined_ty; @@ -22069,18 +22110,18 @@ fn analyzeMinMax( // Compute the final bounds based on the runtime type and the comptime-known bound type const min_val = switch (air_tag) { - .min => try unrefined_elem_ty.minInt(sema.arena, target), - .max => try comptime_elem_ty.minInt(sema.arena, target), // @max(ct, rt) >= ct + .min => try unrefined_elem_ty.minInt(sema.arena, mod), + .max => try comptime_elem_ty.minInt(sema.arena, mod), // @max(ct, rt) >= ct else => unreachable, }; const max_val = switch (air_tag) { - .min => try comptime_elem_ty.maxInt(sema.arena, target), // @min(ct, rt) <= ct - .max => try unrefined_elem_ty.maxInt(sema.arena, target), + .min => try comptime_elem_ty.maxInt(sema.arena, mod), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(sema.arena, mod), else => unreachable, }; // Find the smallest type which can contain these bounds - const final_elem_ty = try Type.intFittingRange(target, sema.arena, min_val, max_val); + const final_elem_ty = try mod.intFittingRange(min_val, max_val); const final_ty = if (is_vector) try Type.vector(sema.arena, unrefined_ty.vectorLen(), final_elem_ty) @@ -22132,6 +22173,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr); const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr); const target = sema.mod.getTarget(); + const mod = sema.mod; if (dest_ty.isConstPtr()) { return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{}); @@ -22196,7 +22238,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { - const len_u64 = (try len_val.?.getUnsignedIntAdvanced(target, sema)).?; + const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); for (0..len) |i| { const elem_index = try sema.addIntUnsigned(Type.usize, i); @@ -22239,12 +22281,12 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // lowering. The AIR instruction requires pointers with element types of // equal ABI size. - if (dest_ty.zigTypeTag() != .Pointer or src_ty.zigTypeTag() != .Pointer) { + if (dest_ty.zigTypeTag(mod) != .Pointer or src_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{}); } - const dest_elem_ty = dest_ty.elemType2(); - const src_elem_ty = src_ty.elemType2(); + const dest_elem_ty = dest_ty.elemType2(mod); + const src_elem_ty = src_ty.elemType2(mod); if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src)) { return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{}); } @@ -22255,7 +22297,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void var new_dest_ptr = dest_ptr; var new_src_ptr = src_ptr; if (len_val) |val| { - const len = val.toUnsignedInt(target); + const len = val.toUnsignedInt(mod); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; @@ -22320,6 +22362,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -22334,14 +22377,13 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void return sema.fail(block, dest_src, "cannot memset constant pointer", .{}); } - const dest_elem_ty = dest_ptr_ty.elemType2(); - const target = sema.mod.getTarget(); + const dest_elem_ty = dest_ptr_ty.elemType2(mod); const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: { const len_air_ref = try sema.fieldVal(block, src, dest_ptr, "len", dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; - const len_u64 = (try len_val.getUnsignedIntAdvanced(target, sema)).?; + const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. @@ -22499,9 +22541,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const align_src: LazySrcLoc = .{ .node_offset_fn_type_align = inst_data.src_node }; const addrspace_src: LazySrcLoc = .{ .node_offset_fn_type_addrspace = inst_data.src_node }; @@ -22535,7 +22578,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.tag() == .generic_poison) { break :blk null; } - const alignment = @intCast(u32, val.toUnsignedInt(target)); + const alignment = @intCast(u32, val.toUnsignedInt(mod)); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk 0; @@ -22551,7 +22594,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - const alignment = @intCast(u32, align_tv.val.toUnsignedInt(target)); + const alignment = @intCast(u32, align_tv.val.toUnsignedInt(mod)); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk 0; @@ -22642,8 +22685,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += body.len; const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, "return type must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - const ty = try val.toType(&buffer).copy(sema.arena); + const ty = try val.toType().copy(sema.arena); break :blk ty; } else if (extra.data.bits.has_ret_ty_ref) blk: { const ret_ty_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); @@ -22654,8 +22696,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - var buffer: Value.ToTypeBuffer = undefined; - const ty = try ret_ty_tv.val.toType(&buffer).copy(sema.arena); + const ty = try ret_ty_tv.val.toType().copy(sema.arena); break :blk ty; } else Type.void; @@ -22727,13 +22768,14 @@ fn zirCDefine( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const val_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; const name = try sema.resolveConstString(block, name_src, extra.lhs, "name of macro being undefined must be comptime-known"); const rhs = try sema.resolveInst(extra.rhs); - if (sema.typeOf(rhs).zigTypeTag() != .Void) { + if (sema.typeOf(rhs).zigTypeTag(mod) != .Void) { const value = try sema.resolveConstString(block, val_src, extra.rhs, "value of macro being undefined must be comptime-known"); try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value }); } else { @@ -22799,9 +22841,9 @@ fn resolvePrefetchOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.PrefetchOptions { + const mod = sema.mod; const options_ty = try sema.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); - const target = sema.mod.getTarget(); const rw_src = sema.maybeOptionsSrc(block, src, "rw"); const locality_src = sema.maybeOptionsSrc(block, src, "locality"); @@ -22818,7 +22860,7 @@ fn resolvePrefetchOptions( return std.builtin.PrefetchOptions{ .rw = rw_val.toEnum(std.builtin.PrefetchOptions.Rw), - .locality = @intCast(u2, locality_val.toUnsignedInt(target)), + .locality = @intCast(u2, locality_val.toUnsignedInt(mod)), .cache = cache_val.toEnum(std.builtin.PrefetchOptions.Cache), }; } @@ -22887,7 +22929,7 @@ fn resolveExternOptions( const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src); const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known"); - const library_name = if (!library_name_val.isNull()) blk: { + const library_name = if (!library_name_val.isNull(mod)) blk: { const payload = library_name_val.castTag(.opt_payload).?.data; const library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); if (library_name.len == 0) { @@ -22917,17 +22959,17 @@ fn zirBuiltinExtern( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; var ty = try sema.resolveType(block, ty_src, extra.lhs); - if (!ty.isPtrAtRuntime()) { + if (!ty.isPtrAtRuntime(mod)) { return sema.fail(block, ty_src, "expected (optional) pointer", .{}); } if (!try sema.validateExternType(ty.childType(), .other)) { const msg = msg: { - const mod = sema.mod; const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); @@ -22945,7 +22987,7 @@ fn zirBuiltinExtern( else => |e| return e, }; - if (options.linkage == .Weak and !ty.ptrAllowsZero()) { + if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) { ty = try Type.optional(sema.arena, ty); } @@ -23087,7 +23129,7 @@ fn validateVarType( const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl), var_ty); - if (var_ty.zigTypeTag() == .ComptimeInt or var_ty.zigTypeTag() == .ComptimeFloat) { + if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) { try sema.errNote(block, src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{}); } @@ -23101,8 +23143,9 @@ fn validateRunTimeType( var_ty: Type, is_extern: bool, ) CompileError!bool { + const mod = sema.mod; var ty = var_ty; - while (true) switch (ty.zigTypeTag()) { + while (true) switch (ty.zigTypeTag(mod)) { .Bool, .Int, .Float, @@ -23126,9 +23169,9 @@ fn validateRunTimeType( .Pointer => { const elem_ty = ty.childType(); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Opaque => return true, - .Fn => return elem_ty.isFnOrHasRuntimeBits(), + .Fn => return elem_ty.isFnOrHasRuntimeBits(mod), else => ty = elem_ty, } }, @@ -23174,7 +23217,7 @@ fn explainWhyTypeIsComptimeInner( type_set: *TypeSet, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Bool, .Int, .Float, @@ -23211,8 +23254,8 @@ fn explainWhyTypeIsComptimeInner( try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); }, .Pointer => { - const elem_ty = ty.elemType2(); - if (elem_ty.zigTypeTag() == .Fn) { + const elem_ty = ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Fn) { const fn_info = elem_ty.fnInfo(); if (fn_info.is_generic) { try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{}); @@ -23221,7 +23264,7 @@ fn explainWhyTypeIsComptimeInner( .Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}), else => {}, } - if (fn_info.return_type.comptimeOnly()) { + if (fn_info.return_type.comptimeOnly(mod)) { try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{}); } return; @@ -23295,7 +23338,8 @@ fn validateExternType( ty: Type, position: ExternPosition, ) !bool { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, .ComptimeInt, @@ -23314,7 +23358,7 @@ fn validateExternType( .AnyFrame, => return true, .Pointer => return !(ty.isSlice() or try sema.typeRequiresComptime(ty)), - .Int => switch (ty.intInfo(sema.mod.getTarget()).bits) { + .Int => switch (ty.intInfo(mod).bits) { 8, 16, 32, 64, 128 => return true, else => return false, }, @@ -23329,14 +23373,12 @@ fn validateExternType( return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention()); }, .Enum => { - var buf: Type.Payload.Bits = undefined; - return sema.validateExternType(ty.intTagType(&buf), position); + return sema.validateExternType(ty.intTagType(), position); }, .Struct, .Union => switch (ty.containerLayout()) { .Extern => return true, .Packed => { - const target = sema.mod.getTarget(); - const bit_size = try ty.bitSizeAdvanced(target, sema); + const bit_size = try ty.bitSizeAdvanced(mod, sema); switch (bit_size) { 8, 16, 32, 64, 128 => return true, else => return false, @@ -23346,10 +23388,10 @@ fn validateExternType( }, .Array => { if (position == .ret_ty or position == .param_ty) return false; - return sema.validateExternType(ty.elemType2(), .element); + return sema.validateExternType(ty.elemType2(mod), .element); }, - .Vector => return sema.validateExternType(ty.elemType2(), .element), - .Optional => return ty.isPtrLikeOptional(), + .Vector => return sema.validateExternType(ty.elemType2(mod), .element), + .Optional => return ty.isPtrLikeOptional(mod), } } @@ -23361,7 +23403,7 @@ fn explainWhyTypeIsNotExtern( position: ExternPosition, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Opaque, .Bool, .Float, @@ -23390,7 +23432,7 @@ fn explainWhyTypeIsNotExtern( }, .Void => try mod.errNoteNonLazy(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}), .NoReturn => try mod.errNoteNonLazy(src_loc, msg, "'noreturn' is only allowed as a return type", .{}), - .Int => if (!std.math.isPowerOfTwo(ty.intInfo(sema.mod.getTarget()).bits)) { + .Int => if (!std.math.isPowerOfTwo(ty.intInfo(mod).bits)) { try mod.errNoteNonLazy(src_loc, msg, "only integers with power of two bits are extern compatible", .{}); } else { try mod.errNoteNonLazy(src_loc, msg, "only integers with 8, 16, 32, 64 and 128 bits are extern compatible", .{}); @@ -23409,8 +23451,7 @@ fn explainWhyTypeIsNotExtern( } }, .Enum => { - var buf: Type.Payload.Bits = undefined; - const tag_ty = ty.intTagType(&buf); + const tag_ty = ty.intTagType(); try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position); }, @@ -23422,17 +23463,17 @@ fn explainWhyTypeIsNotExtern( } else if (position == .param_ty) { return mod.errNoteNonLazy(src_loc, msg, "arrays are not allowed as a parameter type", .{}); } - try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(), .element); + try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element); }, - .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(), .element), + .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element), .Optional => try mod.errNoteNonLazy(src_loc, msg, "only pointer like optionals are extern compatible", .{}), } } /// Returns true if `ty` is allowed in packed types. /// Does *NOT* require `ty` to be resolved in any way. -fn validatePackedType(ty: Type) bool { - switch (ty.zigTypeTag()) { +fn validatePackedType(ty: Type, mod: *const Module) bool { + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, .ComptimeInt, @@ -23448,7 +23489,7 @@ fn validatePackedType(ty: Type) bool { .Fn, .Array, => return false, - .Optional => return ty.isPtrLikeOptional(), + .Optional => return ty.isPtrLikeOptional(mod), .Void, .Bool, .Float, @@ -23468,7 +23509,7 @@ fn explainWhyTypeIsNotPacked( ty: Type, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void, .Bool, .Float, @@ -23731,6 +23772,7 @@ fn panicSentinelMismatch( sentinel_index: Air.Inst.Ref, ) !void { assert(!parent_block.is_comptime); + const mod = sema.mod; const expected_sentinel_val = maybe_sentinel orelse return; const expected_sentinel = try sema.addConstant(sentinel_ty, expected_sentinel_val); @@ -23743,7 +23785,7 @@ fn panicSentinelMismatch( break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr); }; - const ok = if (sentinel_ty.zigTypeTag() == .Vector) ok: { + const ok = if (sentinel_ty.zigTypeTag(mod) == .Vector) ok: { const eql = try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq); break :ok try parent_block.addInst(.{ @@ -23753,7 +23795,7 @@ fn panicSentinelMismatch( .operation = .And, } }, }); - } else if (sentinel_ty.isSelfComparable(true)) + } else if (sentinel_ty.isSelfComparable(mod, true)) try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel) else { const panic_fn = try sema.getBuiltin("checkNonScalarSentinel"); @@ -23848,6 +23890,7 @@ fn fieldVal( // When editing this function, note that there is corresponding logic to be edited // in `fieldPtr`. This function takes a value and returns a value. + const mod = sema.mod; const arena = sema.arena; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); @@ -23862,7 +23905,7 @@ fn fieldVal( else object_ty; - switch (inner_ty.zigTypeTag()) { + switch (inner_ty.zigTypeTag(mod)) { .Array => { if (mem.eql(u8, field_name, "len")) { return sema.addConstant( @@ -23926,10 +23969,9 @@ fn fieldVal( object; const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?; - var to_type_buffer: Value.ToTypeBuffer = undefined; - const child_type = val.toType(&to_type_buffer); + const child_type = val.toType(); - switch (try child_type.zigTypeTagOrPoison()) { + switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { if (payload.data.names.getEntry(field_name)) |entry| { @@ -23997,7 +24039,7 @@ fn fieldVal( const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); if (child_type.isSlice()) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); - if (child_type.zigTypeTag() == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); + if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -24035,9 +24077,10 @@ fn fieldPtr( // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. + const mod = sema.mod; const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); - const object_ty = switch (object_ptr_ty.zigTypeTag()) { + const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { .Pointer => object_ptr_ty.elemType(), else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}), }; @@ -24052,7 +24095,7 @@ fn fieldPtr( else object_ty; - switch (inner_ty.zigTypeTag()) { + switch (inner_ty.zigTypeTag(mod)) { .Array => { if (mem.eql(u8, field_name, "len")) { var anon_decl = try block.startAnonDecl(); @@ -24142,10 +24185,9 @@ fn fieldPtr( result; const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?; - var to_type_buffer: Value.ToTypeBuffer = undefined; - const child_type = val.toType(&to_type_buffer); + const child_type = val.toType(); - switch (child_type.zigTypeTag()) { + switch (child_type.zigTypeTag(mod)) { .ErrorSet => { // TODO resolve inferred error sets const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { @@ -24258,15 +24300,16 @@ fn fieldCallBind( // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. + const mod = sema.mod; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); - const inner_ty = if (raw_ptr_ty.zigTypeTag() == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C)) + const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C)) raw_ptr_ty.childType() else return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)}); // Optionally dereference a second pointer to get the concrete type. - const is_double_ptr = inner_ty.zigTypeTag() == .Pointer and inner_ty.ptrSize() == .One; + const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize() == .One; const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty; const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; const object_ptr = if (is_double_ptr) @@ -24275,7 +24318,7 @@ fn fieldCallBind( raw_ptr; find_field: { - switch (concrete_ty.zigTypeTag()) { + switch (concrete_ty.zigTypeTag(mod)) { .Struct => { const struct_ty = try sema.resolveTypeFields(concrete_ty); if (struct_ty.castTag(.@"struct")) |struct_obj| { @@ -24321,21 +24364,21 @@ fn fieldCallBind( } // If we get here, we need to look for a decl in the struct type instead. - const found_decl = switch (concrete_ty.zigTypeTag()) { + const found_decl = switch (concrete_ty.zigTypeTag(mod)) { .Struct, .Opaque, .Union, .Enum => found_decl: { if (concrete_ty.getNamespace()) |namespace| { if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| { try sema.addReferencedBy(block, src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); const decl_type = sema.typeOf(decl_val); - if (decl_type.zigTypeTag() == .Fn and + if (decl_type.zigTypeTag(mod) == .Fn and decl_type.fnParamLen() >= 1) { const first_param_type = decl_type.fnParamType(0); const first_param_tag = first_param_type.tag(); // zig fmt: off if (first_param_tag == .generic_poison or ( - first_param_type.zigTypeTag() == .Pointer and + first_param_type.zigTypeTag(mod) == .Pointer and (first_param_type.ptrSize() == .One or first_param_type.ptrSize() == .C) and first_param_type.childType().eql(concrete_ty, sema.mod))) @@ -24356,7 +24399,7 @@ fn fieldCallBind( .func_inst = decl_val, .arg0_inst = deref, } }; - } else if (first_param_type.zigTypeTag() == .Optional) { + } else if (first_param_type.zigTypeTag(mod) == .Optional) { var opt_buf: Type.Payload.ElemType = undefined; const child = first_param_type.optionalChild(&opt_buf); if (child.eql(concrete_ty, sema.mod)) { @@ -24365,7 +24408,7 @@ fn fieldCallBind( .func_inst = decl_val, .arg0_inst = deref, } }; - } else if (child.zigTypeTag() == .Pointer and + } else if (child.zigTypeTag(mod) == .Pointer and child.ptrSize() == .One and child.childType().eql(concrete_ty, sema.mod)) { @@ -24374,7 +24417,7 @@ fn fieldCallBind( .arg0_inst = object_ptr, } }; } - } else if (first_param_type.zigTypeTag() == .ErrorUnion and + } else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and first_param_type.errorUnionPayload().eql(concrete_ty, sema.mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); @@ -24421,9 +24464,10 @@ fn finishFieldCallBind( .@"addrspace" = ptr_ty.ptrAddressSpace(), }); + const mod = sema.mod; const container_ty = ptr_ty.childType(); - if (container_ty.zigTypeTag() == .Struct) { - if (container_ty.structFieldValueComptime(field_index)) |default_val| { + if (container_ty.zigTypeTag(mod) == .Struct) { + if (container_ty.structFieldValueComptime(mod, field_index)) |default_val| { return .{ .direct = try sema.addConstant(field_ty, default_val) }; } } @@ -24504,7 +24548,8 @@ fn structFieldPtr( unresolved_struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { - assert(unresolved_struct_ty.zigTypeTag() == .Struct); + const mod = sema.mod; + assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); try sema.resolveStructLayout(struct_ty); @@ -24544,6 +24589,7 @@ fn structFieldPtrByIndex( return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing); } + const mod = sema.mod; const struct_obj = struct_ty.castTag(.@"struct").?.data; const field = struct_obj.fields.values()[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); @@ -24568,7 +24614,7 @@ fn structFieldPtrByIndex( if (i == field_index) { ptr_ty_data.bit_offset = running_bits; } - running_bits += @intCast(u16, f.ty.bitSize(target)); + running_bits += @intCast(u16, f.ty.bitSize(mod)); } ptr_ty_data.host_size = (running_bits + 7) / 8; @@ -24582,7 +24628,7 @@ fn structFieldPtrByIndex( const parent_align = if (struct_ptr_ty_info.@"align" != 0) struct_ptr_ty_info.@"align" else - struct_ptr_ty_info.pointee_type.abiAlignment(target); + struct_ptr_ty_info.pointee_type.abiAlignment(mod); ptr_ty_data.@"align" = parent_align; // If the field happens to be byte-aligned, simplify the pointer type. @@ -24596,8 +24642,8 @@ fn structFieldPtrByIndex( if (parent_align != 0 and ptr_ty_data.bit_offset % 8 == 0 and target.cpu.arch.endian() == .Little) { - const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(target); - const elem_size_bits = ptr_ty_data.pointee_type.bitSize(target); + const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(mod); + const elem_size_bits = ptr_ty_data.pointee_type.bitSize(mod); if (elem_size_bytes * 8 == elem_size_bits) { const byte_offset = ptr_ty_data.bit_offset / 8; const new_align = @as(u32, 1) << @intCast(u5, @ctz(byte_offset | parent_align)); @@ -24644,7 +24690,8 @@ fn structFieldVal( field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { - assert(unresolved_struct_ty.zigTypeTag() == .Struct); + const mod = sema.mod; + assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); switch (struct_ty.tag()) { @@ -24728,9 +24775,10 @@ fn tupleFieldValByIndex( field_index: u32, tuple_ty: Type, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(field_index)) |default_value| { + if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); } @@ -24743,7 +24791,7 @@ fn tupleFieldValByIndex( return sema.addConstant(field_ty, field_values[field_index]); } - if (tuple_ty.structFieldValueComptime(field_index)) |default_val| { + if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return sema.addConstant(field_ty, default_val); } @@ -24762,7 +24810,9 @@ fn unionFieldPtr( initializing: bool, ) CompileError!Air.Inst.Ref { const arena = sema.arena; - assert(unresolved_union_ty.zigTypeTag() == .Union); + const mod = sema.mod; + + assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ptr_ty = sema.typeOf(union_ptr); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); @@ -24777,7 +24827,7 @@ fn unionFieldPtr( }); const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); - if (initializing and field.ty.zigTypeTag() == .NoReturn) { + if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); @@ -24839,7 +24889,7 @@ fn unionFieldPtr( const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_val); try sema.panicInactiveUnionField(block, active_tag, wanted_tag); } - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -24855,7 +24905,8 @@ fn unionFieldVal( field_name_src: LazySrcLoc, unresolved_union_ty: Type, ) CompileError!Air.Inst.Ref { - assert(unresolved_union_ty.zigTypeTag() == .Union); + const mod = sema.mod; + assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); const union_obj = union_ty.cast(Type.Payload.Union).?.data; @@ -24911,7 +24962,7 @@ fn unionFieldVal( const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval); try sema.panicInactiveUnionField(block, active_tag, wanted_tag); } - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -24928,22 +24979,22 @@ fn elemPtr( init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const indexable_ptr_src = src; // TODO better source location const indexable_ptr_ty = sema.typeOf(indexable_ptr); - const target = sema.mod.getTarget(); - const indexable_ty = switch (indexable_ptr_ty.zigTypeTag()) { + const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) { .Pointer => indexable_ptr_ty.elemType(), else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}), }; try checkIndexable(sema, block, src, indexable_ty); - switch (indexable_ty.zigTypeTag()) { + switch (indexable_ty.zigTypeTag(mod)) { .Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety), .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(target)); + const index = @intCast(u32, index_val.toUnsignedInt(mod)); return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { @@ -24966,7 +25017,7 @@ fn elemPtrOneLayerOnly( ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); - const target = sema.mod.getTarget(); + const mod = sema.mod; try checkIndexable(sema, block, src, indexable_ty); @@ -24978,7 +25029,7 @@ fn elemPtrOneLayerOnly( const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); const result_ty = try sema.elemPtrType(indexable_ty, index); return sema.addConstant(result_ty, elem_ptr); @@ -24989,7 +25040,7 @@ fn elemPtrOneLayerOnly( return block.addPtrElemPtr(indexable, elem_index, result_ty); }, .One => { - assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety); }, } @@ -25006,7 +25057,7 @@ fn elemVal( ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); - const target = sema.mod.getTarget(); + const mod = sema.mod; try checkIndexable(sema, block, src, indexable_ty); @@ -25014,7 +25065,7 @@ fn elemVal( // index is a scalar or vector instead of unconditionally casting to usize. const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src); - switch (indexable_ty.zigTypeTag()) { + switch (indexable_ty.zigTypeTag(mod)) { .Pointer => switch (indexable_ty.ptrSize()) { .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { @@ -25024,10 +25075,10 @@ fn elemVal( const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { - return sema.addConstant(indexable_ty.elemType2(), elem_val); + return sema.addConstant(indexable_ty.elemType2(mod), elem_val); } break :rs indexable_src; }; @@ -25036,7 +25087,7 @@ fn elemVal( return block.addBinOp(.ptr_elem_val, indexable, elem_index); }, .One => { - assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety); return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src); }, @@ -25049,7 +25100,7 @@ fn elemVal( .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(target)); + const index = @intCast(u32, index_val.toUnsignedInt(mod)); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, @@ -25093,6 +25144,7 @@ fn tupleFieldPtr( field_index: u32, init: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const tuple_ptr_ty = sema.typeOf(tuple_ptr); const tuple_ty = tuple_ptr_ty.childType(); _ = try sema.resolveTypeFields(tuple_ty); @@ -25116,7 +25168,7 @@ fn tupleFieldPtr( .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(), }); - if (tuple_ty.structFieldValueComptime(field_index)) |default_val| { + if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ .field_ty = field_ty, .field_val = default_val, @@ -25151,6 +25203,7 @@ fn tupleField( field_index_src: LazySrcLoc, field_index: u32, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const tuple_ty = try sema.resolveTypeFields(sema.typeOf(tuple)); const field_count = tuple_ty.structFieldCount(); @@ -25166,13 +25219,13 @@ fn tupleField( const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(field_index)) |default_value| { + if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); // comptime field } if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); - return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, field_index)); + return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, mod, field_index)); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); @@ -25191,6 +25244,7 @@ fn elemValArray( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const array_ty = sema.typeOf(array); const array_sent = array_ty.sentinel(); const array_len = array_ty.arrayLen(); @@ -25204,10 +25258,9 @@ fn elemValArray( const maybe_undef_array_val = try sema.resolveMaybeUndefVal(array); // index must be defined since it can access out of bounds const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); - const target = sema.mod.getTarget(); if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); if (array_sent) |s| { if (index == array_len) { return sema.addConstant(elem_ty, s); @@ -25223,7 +25276,7 @@ fn elemValArray( return sema.addConstUndef(elem_ty); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); const elem_val = try array_val.elemValue(sema.mod, sema.arena, index); return sema.addConstant(elem_ty, elem_val); } @@ -25255,7 +25308,7 @@ fn elemPtrArray( init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); + const mod = sema.mod; const array_ptr_ty = sema.typeOf(array_ptr); const array_ty = array_ptr_ty.childType(); const array_sent = array_ty.sentinel() != null; @@ -25269,7 +25322,7 @@ fn elemPtrArray( const maybe_undef_array_ptr_val = try sema.resolveMaybeUndefVal(array_ptr); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(target)); + const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod)); if (index >= array_len_s) { const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label }); @@ -25290,7 +25343,7 @@ fn elemPtrArray( } if (!init) { - try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(), array_ty, array_ptr_src); + try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(mod), array_ty, array_ptr_src); } const runtime_src = if (maybe_undef_array_ptr_val != null) elem_index_src else array_ptr_src; @@ -25316,16 +25369,16 @@ fn elemValSlice( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const slice_ty = sema.typeOf(slice); const slice_sent = slice_ty.sentinel() != null; - const elem_ty = slice_ty.elemType2(); + const elem_ty = slice_ty.elemType2(mod); var runtime_src = slice_src; // slice must be defined since it can dereferenced as null const maybe_slice_val = try sema.resolveDefinedValue(block, slice_src, slice); // index must be defined since it can index out of bounds const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); - const target = sema.mod.getTarget(); if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; @@ -25335,7 +25388,7 @@ fn elemValSlice( return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); @@ -25373,14 +25426,14 @@ fn elemPtrSlice( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); + const mod = sema.mod; const slice_ty = sema.typeOf(slice); const slice_sent = slice_ty.sentinel() != null; const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(slice); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(target)); + const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod)); break :o index; } else null; @@ -25484,6 +25537,7 @@ fn coerceExtra( const dest_ty_src = inst_src; // TODO better source location const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); const inst_ty = try sema.resolveTypeFields(sema.typeOf(inst)); + const mod = sema.mod; const target = sema.mod.getTarget(); // If the types are the same, we can return the operand. if (dest_ty.eql(inst_ty, sema.mod)) @@ -25502,9 +25556,9 @@ fn coerceExtra( return block.addBitCast(dest_ty, inst); } - const is_undef = inst_ty.zigTypeTag() == .Undefined; + const is_undef = inst_ty.zigTypeTag(mod) == .Undefined; - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .Optional => optional: { // undefined sets the optional bit also to undefined. if (is_undef) { @@ -25512,18 +25566,18 @@ fn coerceExtra( } // null to ?T - if (inst_ty.zigTypeTag() == .Null) { + if (inst_ty.zigTypeTag(mod) == .Null) { return sema.addConstant(dest_ty, Value.null); } // cast from ?*T and ?[*]T to ?*anyopaque // but don't do it if the source type is a double pointer - if (dest_ty.isPtrLikeOptional() and dest_ty.elemType2().tag() == .anyopaque and - inst_ty.isPtrAtRuntime()) + if (dest_ty.isPtrLikeOptional(mod) and dest_ty.elemType2(mod).tag() == .anyopaque and + inst_ty.isPtrAtRuntime(mod)) anyopaque_check: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional; - const elem_ty = inst_ty.elemType2(); - if (elem_ty.zigTypeTag() == .Pointer or elem_ty.isPtrLikeOptional()) { + const elem_ty = inst_ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { in_memory_result = .{ .double_ptr_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, @@ -25532,7 +25586,7 @@ fn coerceExtra( } // Let the logic below handle wrapping the optional now that // it has been checked to correctly coerce. - if (!inst_ty.isPtrLikeOptional()) break :anyopaque_check; + if (!inst_ty.isPtrLikeOptional(mod)) break :anyopaque_check; return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } @@ -25554,7 +25608,7 @@ fn coerceExtra( const dest_info = dest_ty.ptrInfo().data; // Function body to function pointer. - if (inst_ty.zigTypeTag() == .Fn) { + if (inst_ty.zigTypeTag(mod) == .Fn) { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); const fn_decl = fn_val.pointerDecl().?; const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); @@ -25568,7 +25622,7 @@ fn coerceExtra( if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const ptr_elem_ty = inst_ty.childType(); const array_ty = dest_info.pointee_type; - if (array_ty.zigTypeTag() != .Array) break :single_item; + if (array_ty.zigTypeTag(mod) != .Array) break :single_item; const array_elem_ty = array_ty.childType(); if (array_ty.arrayLen() != 1) break :single_item; const dest_is_mut = dest_info.mutable; @@ -25584,7 +25638,7 @@ fn coerceExtra( if (!inst_ty.isSinglePointer()) break :src_array_ptr; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const array_ty = inst_ty.childType(); - if (array_ty.zigTypeTag() != .Array) break :src_array_ptr; + if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr; const array_elem_type = array_ty.childType(); const dest_is_mut = dest_info.mutable; @@ -25656,10 +25710,10 @@ fn coerceExtra( // cast from *T and [*]T to *anyopaque // but don't do it if the source type is a double pointer - if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag() == .Pointer) to_anyopaque: { + if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const elem_ty = inst_ty.elemType2(); - if (elem_ty.zigTypeTag() == .Pointer or elem_ty.isPtrLikeOptional()) { + const elem_ty = inst_ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { in_memory_result = .{ .double_ptr_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, @@ -25679,7 +25733,7 @@ fn coerceExtra( switch (dest_info.size) { // coercion to C pointer - .C => switch (inst_ty.zigTypeTag()) { + .C => switch (inst_ty.zigTypeTag(mod)) { .Null => { return sema.addConstant(dest_ty, Value.null); }, @@ -25691,7 +25745,7 @@ fn coerceExtra( return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); }, .Int => { - const ptr_size_ty = switch (inst_ty.intInfo(target).signedness) { + const ptr_size_ty = switch (inst_ty.intInfo(mod).signedness) { .signed => Type.isize, .unsigned => Type.usize, }; @@ -25733,7 +25787,7 @@ fn coerceExtra( }, else => {}, }, - .One => switch (dest_info.pointee_type.zigTypeTag()) { + .One => switch (dest_info.pointee_type.zigTypeTag(mod)) { .Union => { // pointer to anonymous struct to pointer to union if (inst_ty.isSinglePointer() and @@ -25767,7 +25821,7 @@ fn coerceExtra( else => {}, }, .Slice => to_slice: { - if (inst_ty.zigTypeTag() == .Array) { + if (inst_ty.zigTypeTag(mod) == .Array) { return sema.fail( block, inst_src, @@ -25789,7 +25843,7 @@ fn coerceExtra( .ptr = if (dest_info.@"align" != 0) try Value.Tag.int_u64.create(sema.arena, dest_info.@"align") else - try dest_info.pointee_type.lazyAbiAlignment(target, sema.arena), + try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), .len = Value.zero, }); return sema.addConstant(dest_ty, slice_val); @@ -25834,13 +25888,13 @@ fn coerceExtra( }, } }, - .Int, .ComptimeInt => switch (inst_ty.zigTypeTag()) { + .Int, .ComptimeInt => switch (inst_ty.zigTypeTag(mod)) { .Float, .ComptimeFloat => float: { if (is_undef) { return sema.addConstUndef(dest_ty); } const val = (try sema.resolveMaybeUndefVal(inst)) orelse { - if (dest_ty.zigTypeTag() == .ComptimeInt) { + if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known"); } @@ -25870,15 +25924,15 @@ fn coerceExtra( } return try sema.addConstant(dest_ty, val); } - if (dest_ty.zigTypeTag() == .ComptimeInt) { + if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; if (opts.no_cast_to_comptime_int) return inst; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known"); } // integer widening - const dst_info = dest_ty.intInfo(target); - const src_info = inst_ty.intInfo(target); + const dst_info = dest_ty.intInfo(mod); + const src_info = inst_ty.intInfo(mod); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (dst_info.signedness == .signed and dst_info.bits > src_info.bits)) @@ -25892,7 +25946,7 @@ fn coerceExtra( }, else => {}, }, - .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag()) { + .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) { .ComptimeFloat => { const val = try sema.resolveConstValue(block, .unneeded, inst, ""); const result_val = try val.floatCast(sema.arena, dest_ty, target); @@ -25913,7 +25967,7 @@ fn coerceExtra( ); } return try sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeFloat) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -25931,7 +25985,7 @@ fn coerceExtra( return sema.addConstUndef(dest_ty); } const val = (try sema.resolveMaybeUndefVal(inst)) orelse { - if (dest_ty.zigTypeTag() == .ComptimeFloat) { + if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -25955,7 +26009,7 @@ fn coerceExtra( }, else => {}, }, - .Enum => switch (inst_ty.zigTypeTag()) { + .Enum => switch (inst_ty.zigTypeTag(mod)) { .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); @@ -25991,7 +26045,7 @@ fn coerceExtra( }, else => {}, }, - .ErrorUnion => switch (inst_ty.zigTypeTag()) { + .ErrorUnion => switch (inst_ty.zigTypeTag(mod)) { .ErrorUnion => eu: { if (maybe_inst_val) |inst_val| { switch (inst_val.tag()) { @@ -26031,7 +26085,7 @@ fn coerceExtra( }; }, }, - .Union => switch (inst_ty.zigTypeTag()) { + .Union => switch (inst_ty.zigTypeTag(mod)) { .Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst_ty.isAnonStruct()) { @@ -26043,7 +26097,7 @@ fn coerceExtra( }, else => {}, }, - .Array => switch (inst_ty.zigTypeTag()) { + .Array => switch (inst_ty.zigTypeTag(mod)) { .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst == .empty_struct) { @@ -26058,7 +26112,7 @@ fn coerceExtra( }, else => {}, }, - .Vector => switch (inst_ty.zigTypeTag()) { + .Vector => switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst_ty.isTuple()) { @@ -26093,7 +26147,7 @@ fn coerceExtra( if (!opts.report_err) return error.NotCoercible; - if (opts.is_ret and dest_ty.zigTypeTag() == .NoReturn) { + if (opts.is_ret and dest_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "function declared 'noreturn' returns", .{}); errdefer msg.destroy(sema.gpa); @@ -26111,7 +26165,7 @@ fn coerceExtra( errdefer msg.destroy(sema.gpa); // E!T to T - if (inst_ty.zigTypeTag() == .ErrorUnion and + if (inst_ty.zigTypeTag(mod) == .ErrorUnion and (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{}); @@ -26120,7 +26174,7 @@ fn coerceExtra( // ?T to T var buf: Type.Payload.ElemType = undefined; - if (inst_ty.zigTypeTag() == .Optional and + if (inst_ty.zigTypeTag(mod) == .Optional and (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{}); @@ -26133,7 +26187,7 @@ fn coerceExtra( if (opts.is_ret and sema.mod.test_functions.get(sema.func.?.owner_decl) == null) { const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); - if (inst_ty.isError() and !dest_ty.isError()) { + if (inst_ty.isError(mod) and !dest_ty.isError(mod)) { try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function cannot return an error", .{}); } else { try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function return type declared here", .{}); @@ -26264,6 +26318,7 @@ const InMemoryCoercionResult = union(enum) { } fn report(res: *const InMemoryCoercionResult, sema: *Sema, block: *Block, src: LazySrcLoc, msg: *Module.ErrorMsg) !void { + const mod = sema.mod; var cur = res; while (true) switch (cur.*) { .ok => unreachable, @@ -26445,8 +26500,8 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_allowzero => |pair| { - const wanted_allow_zero = pair.wanted.ptrAllowsZero(); - const actual_allow_zero = pair.actual.ptrAllowsZero(); + const wanted_allow_zero = pair.wanted.ptrAllowsZero(mod); + const actual_allow_zero = pair.actual.ptrAllowsZero(mod); if (actual_allow_zero and !wanted_allow_zero) { try sema.errNote(block, src, msg, "'{}' could have null values which are illegal in type '{}'", .{ pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), @@ -26522,13 +26577,15 @@ fn coerceInMemoryAllowed( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) CompileError!InMemoryCoercionResult { - if (dest_ty.eql(src_ty, sema.mod)) + const mod = sema.mod; + + if (dest_ty.eql(src_ty, mod)) return .ok; // Differently-named integers with the same number of bits. - if (dest_ty.zigTypeTag() == .Int and src_ty.zigTypeTag() == .Int) { - const dest_info = dest_ty.intInfo(target); - const src_info = src_ty.intInfo(target); + if (dest_ty.zigTypeTag(mod) == .Int and src_ty.zigTypeTag(mod) == .Int) { + const dest_info = dest_ty.intInfo(mod); + const src_info = src_ty.intInfo(mod); if (dest_info.signedness == src_info.signedness and dest_info.bits == src_info.bits) @@ -26551,7 +26608,7 @@ fn coerceInMemoryAllowed( } // Differently-named floats with the same number of bits. - if (dest_ty.zigTypeTag() == .Float and src_ty.zigTypeTag() == .Float) { + if (dest_ty.zigTypeTag(mod) == .Float and src_ty.zigTypeTag(mod) == .Float) { const dest_bits = dest_ty.floatBits(target); const src_bits = src_ty.floatBits(target); if (dest_bits == src_bits) { @@ -26575,8 +26632,8 @@ fn coerceInMemoryAllowed( return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src); } - const dest_tag = dest_ty.zigTypeTag(); - const src_tag = src_ty.zigTypeTag(); + const dest_tag = dest_ty.zigTypeTag(mod); + const src_tag = src_ty.zigTypeTag(mod); // Functions if (dest_tag == .Fn and src_tag == .Fn) { @@ -26624,7 +26681,7 @@ fn coerceInMemoryAllowed( } const ok_sent = dest_info.sentinel == null or (src_info.sentinel != null and - dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, sema.mod)); + dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, mod)); if (!ok_sent) { return InMemoryCoercionResult{ .array_sentinel = .{ .actual = src_info.sentinel orelse Value.initTag(.unreachable_value), @@ -26646,8 +26703,8 @@ fn coerceInMemoryAllowed( } }; } - const dest_elem_ty = dest_ty.scalarType(); - const src_elem_ty = src_ty.scalarType(); + const dest_elem_ty = dest_ty.scalarType(mod); + const src_elem_ty = src_ty.scalarType(mod); const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src); if (child != .ok) { return InMemoryCoercionResult{ .vector_elem = .{ @@ -26923,6 +26980,7 @@ fn coerceInMemoryAllowedPtrs( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { + const mod = sema.mod; const dest_info = dest_ptr_ty.ptrInfo().data; const src_info = src_ptr_ty.ptrInfo().data; @@ -26964,8 +27022,8 @@ fn coerceInMemoryAllowedPtrs( } }; } - const dest_allow_zero = dest_ty.ptrAllowsZero(); - const src_allow_zero = src_ty.ptrAllowsZero(); + const dest_allow_zero = dest_ty.ptrAllowsZero(mod); + const src_allow_zero = src_ty.ptrAllowsZero(mod); const ok_allows_zero = (dest_allow_zero and (src_allow_zero or !dest_is_mut)) or @@ -27013,12 +27071,12 @@ fn coerceInMemoryAllowedPtrs( const src_align = if (src_info.@"align" != 0) src_info.@"align" else - src_info.pointee_type.abiAlignment(target); + src_info.pointee_type.abiAlignment(mod); const dest_align = if (dest_info.@"align" != 0) dest_info.@"align" else - dest_info.pointee_type.abiAlignment(target); + dest_info.pointee_type.abiAlignment(mod); if (dest_align > src_align) { return InMemoryCoercionResult{ .ptr_alignment = .{ @@ -27041,8 +27099,9 @@ fn coerceVarArgParam( ) !Air.Inst.Ref { if (block.is_typeof) return inst; + const mod = sema.mod; const uncasted_ty = sema.typeOf(inst); - const coerced = switch (uncasted_ty.zigTypeTag()) { + const coerced = switch (uncasted_ty.zigTypeTag(mod)) { // TODO consider casting to c_int/f64 if they fit .ComptimeInt, .ComptimeFloat => return sema.fail( block, @@ -27124,7 +27183,8 @@ fn storePtr2( // this code does not handle tuple-to-struct coercion which requires dealing with missing // fields. const operand_ty = sema.typeOf(uncasted_operand); - if (operand_ty.isTuple() and elem_ty.zigTypeTag() == .Array) { + const mod = sema.mod; + if (operand_ty.isTuple() and elem_ty.zigTypeTag(mod) == .Array) { const field_count = operand_ty.structFieldCount(); var i: u32 = 0; while (i < field_count) : (i += 1) { @@ -27225,7 +27285,8 @@ fn storePtr2( /// lengths match. fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { const array_ty = sema.typeOf(ptr).childType(); - if (array_ty.zigTypeTag() != .Array) return null; + const mod = sema.mod; + if (array_ty.zigTypeTag(mod) != .Array) return null; var ptr_inst = Air.refToIndex(ptr) orelse return null; const air_datas = sema.air_instructions.items(.data); const air_tags = sema.air_instructions.items(.tag); @@ -27237,7 +27298,7 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { .pointer => prev_ptr_ty.castTag(.pointer).?.data.pointee_type, else => return null, }; - if (prev_ptr_child_ty.zigTypeTag() == .Vector) break prev_ptr; + if (prev_ptr_child_ty.zigTypeTag(mod) == .Vector) break prev_ptr; ptr_inst = Air.refToIndex(prev_ptr) orelse return null; } else return null; @@ -27263,6 +27324,7 @@ fn storePtrVal( operand_val: Value, operand_ty: Type, ) !void { + const mod = sema.mod; var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty); try sema.checkComptimeVarStore(block, src, mut_kit.decl_ref_mut); @@ -27281,8 +27343,7 @@ fn storePtrVal( val_ptr.* = try operand_val.copy(arena); }, .reinterpret => |reinterpret| { - const target = sema.mod.getTarget(); - const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(target)); + const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) { @@ -27354,7 +27415,7 @@ fn beginComptimePtrMutation( ptr_val: Value, ptr_elem_ty: Type, ) CompileError!ComptimePtrMutationKit { - const target = sema.mod.getTarget(); + const mod = sema.mod; switch (ptr_val.tag()) { .decl_ref_mut => { const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; @@ -27375,7 +27436,7 @@ fn beginComptimePtrMutation( var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); switch (parent.pointee) { - .direct => |val_ptr| switch (parent.ty.zigTypeTag()) { + .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { .Array, .Vector => { const check_len = parent.ty.arrayLenIncludingSentinel(); if (elem_ptr.index >= check_len) { @@ -27570,7 +27631,7 @@ fn beginComptimePtrMutation( }, }, .reinterpret => |reinterpret| { - if (!elem_ptr.elem_ty.hasWellDefinedLayout()) { + if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) { // Even though the parent value type has well-defined memory layout, our // pointer type does not. return ComptimePtrMutationKit{ @@ -27608,7 +27669,7 @@ fn beginComptimePtrMutation( const arena = parent.beginArena(sema.mod); defer parent.finishArena(sema.mod); - switch (parent.ty.zigTypeTag()) { + switch (parent.ty.zigTypeTag(mod)) { .Struct => { const fields = try arena.alloc(Value, parent.ty.structFieldCount()); @memset(fields, Value.undef); @@ -27746,7 +27807,7 @@ fn beginComptimePtrMutation( else => unreachable, }, .reinterpret => |reinterpret| { - const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, target); + const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod); const field_offset = try sema.usizeCast(block, src, field_offset_u64); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, @@ -27872,7 +27933,8 @@ fn beginComptimePtrMutationInner( ptr_elem_ty: Type, decl_ref_mut: Value.Payload.DeclRefMut.Data, ) CompileError!ComptimePtrMutationKit { - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; if (coerce_ok) { return ComptimePtrMutationKit{ @@ -27883,7 +27945,7 @@ fn beginComptimePtrMutationInner( } // Handle the case that the decl is an array and we're actually trying to point to an element. - if (decl_ty.isArrayOrVector()) { + if (decl_ty.isArrayOrVector(mod)) { const decl_elem_ty = decl_ty.childType(); if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) { return ComptimePtrMutationKit{ @@ -27894,14 +27956,14 @@ fn beginComptimePtrMutationInner( } } - if (!decl_ty.hasWellDefinedLayout()) { + if (!decl_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, .pointee = .{ .bad_decl_ty = {} }, .ty = decl_ty, }; } - if (!ptr_elem_ty.hasWellDefinedLayout()) { + if (!ptr_elem_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, .pointee = .{ .bad_ptr_ty = {} }, @@ -27951,6 +28013,7 @@ fn beginComptimePtrLoad( ptr_val: Value, maybe_array_ty: ?Type, ) ComptimePtrLoadError!ComptimePtrLoadKit { + const mod = sema.mod; const target = sema.mod.getTarget(); var deref: ComptimePtrLoadKit = switch (ptr_val.tag()) { .decl_ref, @@ -27966,7 +28029,7 @@ fn beginComptimePtrLoad( const decl_tv = try decl.typedValue(); if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; - const layout_defined = decl.ty.hasWellDefinedLayout(); + const layout_defined = decl.ty.hasWellDefinedLayout(mod); break :blk ComptimePtrLoadKit{ .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, .pointee = decl_tv, @@ -27988,7 +28051,7 @@ fn beginComptimePtrLoad( } if (elem_ptr.index != 0) { - if (elem_ty.hasWellDefinedLayout()) { + if (elem_ty.hasWellDefinedLayout(mod)) { if (deref.parent) |*parent| { // Update the byte offset (in-place) const elem_size = try sema.typeAbiSize(elem_ty); @@ -28003,7 +28066,7 @@ fn beginComptimePtrLoad( // If we're loading an elem_ptr that was derived from a different type // than the true type of the underlying decl, we cannot deref directly - const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector()) x: { + const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { const deref_elem_ty = deref.pointee.?.ty.childType(); break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; @@ -28018,7 +28081,7 @@ fn beginComptimePtrLoad( if (maybe_array_ty) |load_ty| { // It's possible that we're loading a [N]T, in which case we'd like to slice // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector() and load_ty.childType().eql(elem_ty, sema.mod)) { + if (load_ty.isArrayOrVector(mod) and load_ty.childType().eql(elem_ty, sema.mod)) { const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel()); deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), @@ -28058,7 +28121,7 @@ fn beginComptimePtrLoad( const field_index = @intCast(u32, field_ptr.field_index); var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); - if (field_ptr.container_ty.hasWellDefinedLayout()) { + if (field_ptr.container_ty.hasWellDefinedLayout(mod)) { const struct_ty = field_ptr.container_ty.castTag(.@"struct"); if (struct_ty != null and struct_ty.?.data.layout == .Packed) { // packed structs are not byte addressable @@ -28066,7 +28129,7 @@ fn beginComptimePtrLoad( } else if (deref.parent) |*parent| { // Update the byte offset (in-place) try sema.resolveTypeLayout(field_ptr.container_ty); - const field_offset = field_ptr.container_ty.structFieldOffset(field_index, target); + const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod); parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); } } else { @@ -28103,7 +28166,7 @@ fn beginComptimePtrLoad( const field_ty = field_ptr.container_ty.structFieldType(field_index); deref.pointee = TypedValue{ .ty = field_ty, - .val = tv.val.fieldValue(tv.ty, field_index), + .val = tv.val.fieldValue(tv.ty, mod, field_index), }; } break :blk deref; @@ -28146,7 +28209,7 @@ fn beginComptimePtrLoad( return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); }, .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull()) return sema.fail(block, src, "attempt to use null value", .{}); + if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); break :opt tv.val; }, else => unreachable, @@ -28181,7 +28244,7 @@ fn beginComptimePtrLoad( }; if (deref.pointee) |tv| { - if (deref.parent == null and tv.ty.hasWellDefinedLayout()) { + if (deref.parent == null and tv.ty.hasWellDefinedLayout(mod)) { deref.parent = .{ .tv = tv, .byte_offset = 0 }; } } @@ -28196,15 +28259,15 @@ fn bitCast( inst_src: LazySrcLoc, operand_src: ?LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); try sema.resolveTypeLayout(dest_ty); const old_ty = try sema.resolveTypeFields(sema.typeOf(inst)); try sema.resolveTypeLayout(old_ty); - const target = sema.mod.getTarget(); - const dest_bits = dest_ty.bitSize(target); - const old_bits = old_ty.bitSize(target); + const dest_bits = dest_ty.bitSize(mod); + const old_bits = old_ty.bitSize(mod); if (old_bits != dest_bits) { return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{ @@ -28233,20 +28296,20 @@ fn bitCastVal( new_ty: Type, buffer_offset: usize, ) !?Value { - const target = sema.mod.getTarget(); - if (old_ty.eql(new_ty, sema.mod)) return val; + const mod = sema.mod; + if (old_ty.eql(new_ty, mod)) return val; // For types with well-defined memory layouts, we serialize them a byte buffer, // then deserialize to the new type. - const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target)); + const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); - val.writeToMemory(old_ty, sema.mod, buffer) catch |err| switch (err) { + val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) { error.ReinterpretDeclRef => return null, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}), }; - return try Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena); + return try Value.readFromMemory(new_ty, mod, buffer[buffer_offset..], sema.arena); } fn coerceArrayPtrToSlice( @@ -28272,7 +28335,8 @@ fn coerceArrayPtrToSlice( fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool { const dest_info = dest_ty.ptrInfo().data; const inst_info = inst_ty.ptrInfo().data; - const len0 = (inst_info.pointee_type.zigTypeTag() == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or + const mod = sema.mod; + const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or (inst_info.pointee_type.arrayLen() == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or (inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0); @@ -28298,17 +28362,16 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul } if (inst_info.@"align" == 0 and dest_info.@"align" == 0) return true; if (len0) return true; - const target = sema.mod.getTarget(); const inst_align = if (inst_info.@"align" != 0) inst_info.@"align" else - inst_info.pointee_type.abiAlignment(target); + inst_info.pointee_type.abiAlignment(mod); const dest_align = if (dest_info.@"align" != 0) dest_info.@"align" else - dest_info.pointee_type.abiAlignment(target); + dest_info.pointee_type.abiAlignment(mod); if (dest_align > inst_align) { in_memory_result.* = .{ .ptr_alignment = .{ @@ -28327,18 +28390,19 @@ fn coerceCompatiblePtrs( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); if (try sema.resolveMaybeUndefVal(inst)) |val| { - if (!val.isUndef() and val.isNull() and !dest_ty.isAllowzeroPtr()) { + if (!val.isUndef() and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) { return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } // The comptime Value representation is compatible with both types. return sema.addConstant(dest_ty, val); } try sema.requireRuntimeBlock(block, inst_src, null); - const inst_allows_zero = inst_ty.zigTypeTag() != .Pointer or inst_ty.ptrAllowsZero(); - if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero() and - (try sema.typeHasRuntimeBits(dest_ty.elemType2()) or dest_ty.elemType2().zigTypeTag() == .Fn)) + const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod); + if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and + (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { const actual_ptr = if (inst_ty.isSlice()) try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty) @@ -28364,6 +28428,7 @@ fn coerceEnumToUnion( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); const tag_ty = union_ty.unionTagType() orelse { @@ -28396,7 +28461,7 @@ fn coerceEnumToUnion( const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = union_obj.fields.values()[field_index]; const field_ty = try sema.resolveTypeFields(field.ty); - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); @@ -28449,7 +28514,7 @@ fn coerceEnumToUnion( errdefer if (msg) |some| some.destroy(sema.gpa); for (union_obj.fields.values(), 0..) |field, i| { - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { const err_msg = msg orelse try sema.errMsg( block, inst_src, @@ -28469,7 +28534,7 @@ fn coerceEnumToUnion( } // If the union has all fields 0 bits, the union value is just the enum value. - if (union_ty.unionHasAllZeroBitFieldTypes()) { + if (union_ty.unionHasAllZeroBitFieldTypes(mod)) { return block.addBitCast(union_ty, enum_tag); } @@ -28487,7 +28552,7 @@ fn coerceEnumToUnion( while (it.next()) |field| : (field_index += 1) { const field_name = field.key_ptr.*; const field_ty = field.value_ptr.ty; - if (!field_ty.hasRuntimeBits()) continue; + if (!(try sema.typeHasRuntimeBits(field_ty))) continue; try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' has type '{}'", .{ field_name, field_ty.fmt(sema.mod) }); } try sema.addDeclaredHereNote(msg, union_ty); @@ -29066,12 +29131,13 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo } fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void { - const decl = sema.mod.declPtr(decl_index); + const mod = sema.mod; + const decl = mod.declPtr(decl_index); const tv = try decl.typedValue(); - if (tv.ty.zigTypeTag() != .Fn) return; + if (tv.ty.zigTypeTag(mod) != .Fn) return; if (!try sema.fnHasRuntimeBits(tv.ty)) return; const func = tv.val.castTag(.function) orelse return; // undef or extern_fn - try sema.mod.ensureFuncBodyAnalysisQueued(func.data); + try mod.ensureFuncBodyAnalysisQueued(func.data); } fn analyzeRef( @@ -29124,8 +29190,9 @@ fn analyzeLoad( ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); - const elem_ty = switch (ptr_ty.zigTypeTag()) { + const elem_ty = switch (ptr_ty.zigTypeTag(mod)) { .Pointer => ptr_ty.childType(), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}), }; @@ -29196,12 +29263,13 @@ fn analyzeIsNull( operand: Air.Inst.Ref, invert_logic: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const result_ty = Type.bool; if (try sema.resolveMaybeUndefVal(operand)) |opt_val| { if (opt_val.isUndef()) { return sema.addConstUndef(result_ty); } - const is_null = opt_val.isNull(); + const is_null = opt_val.isNull(mod); const bool_value = if (invert_logic) !is_null else is_null; if (bool_value) { return Air.Inst.Ref.bool_true; @@ -29213,10 +29281,10 @@ fn analyzeIsNull( const inverted_non_null_res = if (invert_logic) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; const operand_ty = sema.typeOf(operand); var buf: Type.Payload.ElemType = undefined; - if (operand_ty.zigTypeTag() == .Optional and operand_ty.optionalChild(&buf).zigTypeTag() == .NoReturn) { + if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(&buf).zigTypeTag(mod) == .NoReturn) { return inverted_non_null_res; } - if (operand_ty.zigTypeTag() != .Optional and !operand_ty.isPtrLikeOptional()) { + if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) { return inverted_non_null_res; } try sema.requireRuntimeBlock(block, src, null); @@ -29230,11 +29298,12 @@ fn analyzePtrIsNonErrComptimeOnly( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ptr_ty = sema.typeOf(operand); - assert(ptr_ty.zigTypeTag() == .Pointer); + assert(ptr_ty.zigTypeTag(mod) == .Pointer); const child_ty = ptr_ty.childType(); - const child_tag = child_ty.zigTypeTag(); + const child_tag = child_ty.zigTypeTag(mod); if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true; if (child_tag == .ErrorSet) return Air.Inst.Ref.bool_false; assert(child_tag == .ErrorUnion); @@ -29251,14 +29320,15 @@ fn analyzeIsNonErrComptimeOnly( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - const ot = operand_ty.zigTypeTag(); + const ot = operand_ty.zigTypeTag(mod); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); const payload_ty = operand_ty.errorUnionPayload(); - if (payload_ty.zigTypeTag() == .NoReturn) { + if (payload_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } @@ -29375,22 +29445,21 @@ fn analyzeSlice( end_src: LazySrcLoc, by_length: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; // Slice expressions can operate on a variable whose type is an array. This requires // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. const ptr_ptr_ty = sema.typeOf(ptr_ptr); - const target = sema.mod.getTarget(); - const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag()) { + const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) { .Pointer => ptr_ptr_ty.elemType(), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}), }; - const mod = sema.mod; var array_ty = ptr_ptr_child_ty; var slice_ty = ptr_ptr_ty; var ptr_or_slice = ptr_ptr; var elem_ty: Type = undefined; var ptr_sentinel: ?Value = null; - switch (ptr_ptr_child_ty.zigTypeTag()) { + switch (ptr_ptr_child_ty.zigTypeTag(mod)) { .Array => { ptr_sentinel = ptr_ptr_child_ty.sentinel(); elem_ty = ptr_ptr_child_ty.childType(); @@ -29398,7 +29467,7 @@ fn analyzeSlice( .Pointer => switch (ptr_ptr_child_ty.ptrSize()) { .One => { const double_child_ty = ptr_ptr_child_ty.childType(); - if (double_child_ty.zigTypeTag() == .Array) { + if (double_child_ty.zigTypeTag(mod) == .Array) { ptr_sentinel = double_child_ty.sentinel(); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; @@ -29417,7 +29486,7 @@ fn analyzeSlice( if (ptr_ptr_child_ty.ptrSize() == .C) { if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| { - if (ptr_val.isNull()) { + if (ptr_val.isNull(mod)) { return sema.fail(block, src, "slice of null pointer", .{}); } } @@ -29448,7 +29517,7 @@ fn analyzeSlice( // we might learn of the length because it is a comptime-known slice value. var end_is_len = uncasted_end_opt == .none; const end = e: { - if (array_ty.zigTypeTag() == .Array) { + if (array_ty.zigTypeTag(mod) == .Array) { const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()); if (!end_is_len) { @@ -29587,8 +29656,8 @@ fn analyzeSlice( } if (try sema.resolveMaybeUndefVal(new_ptr)) |ptr_val| sentinel_check: { const expected_sentinel = sentinel orelse break :sentinel_check; - const start_int = start_val.getUnsignedInt(sema.mod.getTarget()).?; - const end_int = end_val.getUnsignedInt(sema.mod.getTarget()).?; + const start_int = start_val.getUnsignedInt(mod).?; + const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); const elem_ptr = try ptr_val.elemPtr(sema.typeOf(new_ptr), sema.arena, sentinel_index, sema.mod); @@ -29641,7 +29710,7 @@ fn analyzeSlice( const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C; if (opt_new_len_val) |new_len_val| { - const new_len_int = new_len_val.toUnsignedInt(target); + const new_len_int = new_len_val.toUnsignedInt(mod); const return_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty, mod), @@ -29724,7 +29793,7 @@ fn analyzeSlice( } // requirement: end <= len - const opt_len_inst = if (array_ty.zigTypeTag() == .Array) + const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array) try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel()) else if (slice_ty.isSlice()) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { @@ -29778,14 +29847,15 @@ fn cmpNumeric( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); - assert(lhs_ty.isNumeric()); - assert(rhs_ty.isNumeric()); + assert(lhs_ty.isNumeric(mod)); + assert(rhs_ty.isNumeric(mod)); - const lhs_ty_tag = lhs_ty.zigTypeTag(); - const rhs_ty_tag = rhs_ty.zigTypeTag(); + const lhs_ty_tag = lhs_ty.zigTypeTag(mod); + const rhs_ty_tag = rhs_ty.zigTypeTag(mod); const target = sema.mod.getTarget(); // One exception to heterogeneous comparison: comptime_float needs to @@ -29805,14 +29875,14 @@ fn cmpNumeric( if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { // Compare ints: const vs. undefined (or vice versa) - if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt() and rhs_val.isUndef()) { + if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef()) { try sema.resolveLazyValue(lhs_val); - if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } - } else if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt() and lhs_val.isUndef()) { + } else if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef()) { try sema.resolveLazyValue(rhs_val); - if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -29827,16 +29897,16 @@ fn cmpNumeric( return Air.Inst.Ref.bool_false; } } - if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, target, sema)) { + if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } else { - if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt()) { + if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) { // Compare ints: const vs. var try sema.resolveLazyValue(lhs_val); - if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -29844,10 +29914,10 @@ fn cmpNumeric( } } else { if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt()) { + if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) { // Compare ints: var vs. const try sema.resolveLazyValue(rhs_val); - if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -29901,11 +29971,11 @@ fn cmpNumeric( const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) else - (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt()); + (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod)); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| !(try rhs_val.compareAllWithZeroAdvanced(.gte, sema)) else - (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt()); + (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod)); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; var dest_float_type: ?Type = null; @@ -29926,7 +29996,7 @@ fn cmpNumeric( .lt, .lte => return if (lhs_val.isNegativeInf()) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, }; if (!rhs_is_signed) { - switch (lhs_val.orderAgainstZero()) { + switch (lhs_val.orderAgainstZero(mod)) { .gt => {}, .eq => switch (op) { // LHS = 0, RHS is unsigned .lte => return Air.Inst.Ref.bool_true, @@ -29959,13 +30029,13 @@ fn cmpNumeric( } lhs_bits = bigint.toConst().bitCountTwosComp(); } else { - lhs_bits = lhs_val.intBitCountTwosComp(target); + lhs_bits = lhs_val.intBitCountTwosComp(mod); } lhs_bits += @boolToInt(!lhs_is_signed and dest_int_is_signed); } else if (lhs_is_float) { dest_float_type = lhs_ty; } else { - const int_info = lhs_ty.intInfo(target); + const int_info = lhs_ty.intInfo(mod); lhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } @@ -29985,7 +30055,7 @@ fn cmpNumeric( .lt, .lte => return if (rhs_val.isNegativeInf()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, }; if (!lhs_is_signed) { - switch (rhs_val.orderAgainstZero()) { + switch (rhs_val.orderAgainstZero(mod)) { .gt => {}, .eq => switch (op) { // RHS = 0, LHS is unsigned .gte => return Air.Inst.Ref.bool_true, @@ -30018,13 +30088,13 @@ fn cmpNumeric( } rhs_bits = bigint.toConst().bitCountTwosComp(); } else { - rhs_bits = rhs_val.intBitCountTwosComp(target); + rhs_bits = rhs_val.intBitCountTwosComp(mod); } rhs_bits += @boolToInt(!rhs_is_signed and dest_int_is_signed); } else if (rhs_is_float) { dest_float_type = rhs_ty; } else { - const int_info = rhs_ty.intInfo(target); + const int_info = rhs_ty.intInfo(mod); rhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } @@ -30032,7 +30102,7 @@ fn cmpNumeric( const max_bits = std.math.max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}); const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; - break :blk try Module.makeIntType(sema.arena, signedness, casted_bits); + break :blk try mod.intType(signedness, casted_bits); }; const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); @@ -30040,13 +30110,20 @@ fn cmpNumeric( return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs); } -/// Asserts that LHS value is an int or comptime int and not undefined, and that RHS type is an int. -/// Given a const LHS and an unknown RHS, attempt to determine whether `op` has a guaranteed result. +/// Asserts that LHS value is an int or comptime int and not undefined, and +/// that RHS type is an int. Given a const LHS and an unknown RHS, attempt to +/// determine whether `op` has a guaranteed result. /// If it cannot be determined, returns null. /// Otherwise returns a bool for the guaranteed comparison operation. -fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value, op: std.math.CompareOperator, rhs_ty: Type) ?bool { - const rhs_info = rhs_ty.intInfo(target); - const vs_zero = lhs_val.orderAgainstZeroAdvanced(sema) catch unreachable; +fn compareIntsOnlyPossibleResult( + sema: *Sema, + lhs_val: Value, + op: std.math.CompareOperator, + rhs_ty: Type, +) Allocator.Error!?bool { + const mod = sema.mod; + const rhs_info = rhs_ty.intInfo(mod); + const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable; const is_zero = vs_zero == .eq; const is_negative = vs_zero == .lt; const is_positive = vs_zero == .gt; @@ -30078,7 +30155,7 @@ fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value }; const sign_adj = @boolToInt(!is_negative and rhs_info.signedness == .signed); - const req_bits = lhs_val.intBitCountTwosComp(target) + sign_adj; + const req_bits = lhs_val.intBitCountTwosComp(mod) + sign_adj; // No sized type can have more than 65535 bits. // The RHS type operand is either a runtime value or sized (but undefined) constant. @@ -30111,12 +30188,11 @@ fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value .max = false, }; - var ty_buffer: Type.Payload.Bits = .{ - .base = .{ .tag = if (is_negative) .int_signed else .int_unsigned }, - .data = @intCast(u16, req_bits), - }; - const ty = Type.initPayload(&ty_buffer.base); - const pop_count = lhs_val.popCount(ty, target); + const ty = try mod.intType( + if (is_negative) .signed else .unsigned, + @intCast(u16, req_bits), + ); + const pop_count = lhs_val.popCount(ty, mod); if (is_negative) { break :edge .{ @@ -30152,10 +30228,11 @@ fn cmpVector( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - assert(lhs_ty.zigTypeTag() == .Vector); - assert(rhs_ty.zigTypeTag() == .Vector); + assert(lhs_ty.zigTypeTag(mod) == .Vector); + assert(rhs_ty.zigTypeTag(mod) == .Vector); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const resolved_ty = try sema.resolvePeerTypes(block, src, &.{ lhs, rhs }, .{ .override = &.{ lhs_src, rhs_src } }); @@ -30296,16 +30373,17 @@ fn resolvePeerTypes( instructions: []const Air.Inst.Ref, candidate_srcs: Module.PeerTypeCandidateSrc, ) !Type { + const mod = sema.mod; switch (instructions.len) { 0 => return Type.initTag(.noreturn), 1 => return sema.typeOf(instructions[0]), else => {}, } - const target = sema.mod.getTarget(); + const target = mod.getTarget(); var chosen = instructions[0]; - // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(). + // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(mod). // * ErrorSet: this is an override // * ErrorUnion: this is an override of the error set only // * other: at the end we make an ErrorUnion with the other thing and this @@ -30318,8 +30396,8 @@ fn resolvePeerTypes( const candidate_ty = sema.typeOf(candidate); const chosen_ty = sema.typeOf(chosen); - const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(); - const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(); + const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(mod); + const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(mod); // If the candidate can coerce into our chosen type, we're done. // If the chosen type can coerce into the candidate, use that. @@ -30347,8 +30425,8 @@ fn resolvePeerTypes( continue; }, .Int => { - const chosen_info = chosen_ty.intInfo(target); - const candidate_info = candidate_ty.intInfo(target); + const chosen_info = chosen_ty.intInfo(mod); + const candidate_info = candidate_ty.intInfo(mod); if (chosen_info.bits < candidate_info.bits) { chosen = candidate; @@ -30537,7 +30615,7 @@ fn resolvePeerTypes( // *[N]T to []T if ((cand_info.size == .Many or cand_info.size == .Slice) and chosen_info.size == .One and - chosen_info.pointee_type.zigTypeTag() == .Array) + chosen_info.pointee_type.zigTypeTag(mod) == .Array) { // In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T` convert_to_slice = false; @@ -30546,7 +30624,7 @@ fn resolvePeerTypes( continue; } if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { // In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T` @@ -30559,8 +30637,8 @@ fn resolvePeerTypes( // Keep the one whose element type can be coerced into. if (chosen_info.size == .One and cand_info.size == .One and - chosen_info.pointee_type.zigTypeTag() == .Array and - cand_info.pointee_type.zigTypeTag() == .Array) + chosen_info.pointee_type.zigTypeTag(mod) == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array) { const chosen_elem_ty = chosen_info.pointee_type.childType(); const cand_elem_ty = cand_info.pointee_type.childType(); @@ -30631,7 +30709,7 @@ fn resolvePeerTypes( .Optional => { var opt_child_buf: Type.Payload.ElemType = undefined; const chosen_ptr_ty = chosen_ty.optionalChild(&opt_child_buf); - if (chosen_ptr_ty.zigTypeTag() == .Pointer) { + if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { const chosen_info = chosen_ptr_ty.ptrInfo().data; seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30639,7 +30717,7 @@ fn resolvePeerTypes( // *[N]T to ?![*]T // *[N]T to ?![]T if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { continue; @@ -30648,7 +30726,7 @@ fn resolvePeerTypes( }, .ErrorUnion => { const chosen_ptr_ty = chosen_ty.errorUnionPayload(); - if (chosen_ptr_ty.zigTypeTag() == .Pointer) { + if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { const chosen_info = chosen_ptr_ty.ptrInfo().data; seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30656,7 +30734,7 @@ fn resolvePeerTypes( // *[N]T to E![*]T // *[N]T to E![]T if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { continue; @@ -30664,7 +30742,7 @@ fn resolvePeerTypes( } }, .Fn => { - if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag() == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) { + if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag(mod) == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) { chosen = candidate; chosen_i = candidate_i + 1; continue; @@ -30697,16 +30775,16 @@ fn resolvePeerTypes( const chosen_child_ty = chosen_ty.childType(); const candidate_child_ty = candidate_ty.childType(); - if (chosen_child_ty.zigTypeTag() == .Int and candidate_child_ty.zigTypeTag() == .Int) { - const chosen_info = chosen_child_ty.intInfo(target); - const candidate_info = candidate_child_ty.intInfo(target); + if (chosen_child_ty.zigTypeTag(mod) == .Int and candidate_child_ty.zigTypeTag(mod) == .Int) { + const chosen_info = chosen_child_ty.intInfo(mod); + const candidate_info = candidate_child_ty.intInfo(mod); if (chosen_info.bits < candidate_info.bits) { chosen = candidate; chosen_i = candidate_i + 1; } continue; } - if (chosen_child_ty.zigTypeTag() == .Float and candidate_child_ty.zigTypeTag() == .Float) { + if (chosen_child_ty.zigTypeTag(mod) == .Float and candidate_child_ty.zigTypeTag(mod) == .Float) { if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) { chosen = candidate; chosen_i = candidate_i + 1; @@ -30725,7 +30803,7 @@ fn resolvePeerTypes( .Vector => continue, else => {}, }, - .Fn => if (chosen_ty.isSinglePointer() and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag() == .Fn) { + .Fn => if (chosen_ty.isSinglePointer() and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag(mod) == .Fn) { if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(), candidate_ty, target, src, src)) { continue; } @@ -30790,27 +30868,27 @@ fn resolvePeerTypes( // the source locations. const chosen_src = candidate_srcs.resolve( sema.gpa, - sema.mod.declPtr(block.src_decl), + mod.declPtr(block.src_decl), chosen_i, ); const candidate_src = candidate_srcs.resolve( sema.gpa, - sema.mod.declPtr(block.src_decl), + mod.declPtr(block.src_decl), candidate_i + 1, ); const msg = msg: { const msg = try sema.errMsg(block, src, "incompatible types: '{}' and '{}'", .{ - chosen_ty.fmt(sema.mod), - candidate_ty.fmt(sema.mod), + chosen_ty.fmt(mod), + candidate_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (chosen_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(sema.mod)}); + try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(mod)}); if (candidate_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(sema.mod)}); + try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(mod)}); break :msg msg; }; @@ -30826,72 +30904,73 @@ fn resolvePeerTypes( info.data.sentinel = chosen_child_ty.sentinel(); info.data.size = .Slice; info.data.mutable = !(seen_const or chosen_child_ty.isConstPtr()); - info.data.pointee_type = chosen_child_ty.elemType2(); + info.data.pointee_type = chosen_child_ty.elemType2(mod); - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); const opt_ptr_ty = if (any_are_null) try Type.optional(sema.arena, new_ptr_ty) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); } if (seen_const) { // turn []T => []const T - switch (chosen_ty.zigTypeTag()) { + switch (chosen_ty.zigTypeTag(mod)) { .ErrorUnion => { const ptr_ty = chosen_ty.errorUnionPayload(); var info = ptr_ty.ptrInfo(); info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); const opt_ptr_ty = if (any_are_null) try Type.optional(sema.arena, new_ptr_ty) else new_ptr_ty; const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); }, .Pointer => { var info = chosen_ty.ptrInfo(); info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); const opt_ptr_ty = if (any_are_null) try Type.optional(sema.arena, new_ptr_ty) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); }, else => return chosen_ty, } } if (any_are_null) { - const opt_ty = switch (chosen_ty.zigTypeTag()) { + const opt_ty = switch (chosen_ty.zigTypeTag(mod)) { .Null, .Optional => chosen_ty, else => try Type.optional(sema.arena, chosen_ty), }; const set_ty = err_set_ty orelse return opt_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ty, sema.mod); + return try Type.errorUnion(sema.arena, set_ty, opt_ty, mod); } - if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag()) { + if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag(mod)) { .ErrorSet => return ty, .ErrorUnion => { const payload_ty = chosen_ty.errorUnionPayload(); - return try Type.errorUnion(sema.arena, ty, payload_ty, sema.mod); + return try Type.errorUnion(sema.arena, ty, payload_ty, mod); }, - else => return try Type.errorUnion(sema.arena, ty, chosen_ty, sema.mod), + else => return try Type.errorUnion(sema.arena, ty, chosen_ty, mod), }; return chosen_ty; } pub fn resolveFnTypes(sema: *Sema, fn_info: Type.Payload.Function.Data) CompileError!void { + const mod = sema.mod; try sema.resolveTypeFully(fn_info.return_type); - if (sema.mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError()) { + if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError(mod)) { // Ensure the type exists so that backends can assume that. _ = try sema.getBuiltinType("StackTrace"); } @@ -30943,7 +31022,8 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { } pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Struct => return sema.resolveStructLayout(ty), .Union => return sema.resolveUnionLayout(ty), .Array => { @@ -31021,7 +31101,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { struct_obj.status = .have_layout; _ = try sema.resolveTypeRequiresComptime(resolved_ty); - if (struct_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) { + if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) { const msg = try Module.ErrorMsg.create( sema.gpa, struct_obj.srcLoc(sema.mod), @@ -31043,7 +31123,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { }; for (struct_obj.fields.values(), 0..) |field, i| { - optimized_order[i] = if (field.ty.hasRuntimeBits()) + optimized_order[i] = if (!(try sema.typeHasRuntimeBits(field.ty))) @intCast(u32, i) else Module.Struct.omitted_field; @@ -31054,11 +31134,11 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { sema: *Sema, fn lessThan(ctx: @This(), a: u32, b: u32) bool { + const m = ctx.sema.mod; if (a == Module.Struct.omitted_field) return false; if (b == Module.Struct.omitted_field) return true; - const target = ctx.sema.mod.getTarget(); - return ctx.struct_obj.fields.values()[a].ty.abiAlignment(target) > - ctx.struct_obj.fields.values()[b].ty.abiAlignment(target); + return ctx.struct_obj.fields.values()[a].ty.abiAlignment(m) > + ctx.struct_obj.fields.values()[b].ty.abiAlignment(m); } }; mem.sort(u32, optimized_order, AlignSortContext{ @@ -31073,11 +31153,10 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void { const gpa = mod.gpa; - const target = mod.getTarget(); var fields_bit_sum: u64 = 0; for (struct_obj.fields.values()) |field| { - fields_bit_sum += field.ty.bitSize(target); + fields_bit_sum += field.ty.bitSize(mod); } const decl_index = struct_obj.owner_decl; @@ -31178,32 +31257,29 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi }; return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); } - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, fields_bit_sum), - }; - struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(decl_arena_allocator); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); } } fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void { - const target = sema.mod.getTarget(); + const mod = sema.mod; - if (!backing_int_ty.isInt()) { + if (!backing_int_ty.isInt(mod)) { return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)}); } - if (backing_int_ty.bitSize(target) != fields_bit_sum) { + if (backing_int_ty.bitSize(mod) != fields_bit_sum) { return sema.fail( block, src, "backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}", - .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(target), fields_bit_sum }, + .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(mod), fields_bit_sum }, ); } } fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - if (!ty.isIndexable()) { + const mod = sema.mod; + if (!ty.isIndexable(mod)) { const msg = msg: { const msg = try sema.errMsg(block, src, "type '{}' does not support indexing", .{ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -31215,12 +31291,13 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { } fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - if (ty.zigTypeTag() == .Pointer) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Pointer) { switch (ty.ptrSize()) { .Slice, .Many, .C => return, .One => { const elem_ty = ty.childType(); - if (elem_ty.zigTypeTag() == .Array) return; + if (elem_ty.zigTypeTag(mod) == .Array) return; // TODO https://github.com/ziglang/zig/issues/15479 // if (elem_ty.isTuple()) return; }, @@ -31270,7 +31347,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { union_obj.status = .have_layout; _ = try sema.resolveTypeRequiresComptime(resolved_ty); - if (union_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) { + if (union_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) { const msg = try Module.ErrorMsg.create( sema.gpa, union_obj.srcLoc(sema.mod), @@ -31285,6 +31362,23 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { // for hasRuntimeBits() of each field, so we need "requires comptime" // to be known already before this function returns. pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { + const mod = sema.mod; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return false, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + return switch (ty.tag()) { .u1, .u8, @@ -31349,8 +31443,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .generic_poison, .array_u8, .array_u8_sentinel_0, - .int_signed, - .int_unsigned, .enum_simple, => false, @@ -31360,11 +31452,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .comptime_float, .enum_literal, .type_info, - // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, => true, @@ -31387,7 +31474,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .mut_slice, => { const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { + if (child_ty.zigTypeTag(mod) == .Fn) { return child_ty.fnInfo().is_generic; } else { return sema.resolveTypeRequiresComptime(child_ty); @@ -31474,7 +31561,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { /// Returns `error.AnalysisFail` if any of the types (recursively) failed to /// be resolved. pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => { const child_ty = try sema.resolveTypeFields(ty.childType()); return sema.resolveTypeFully(child_ty); @@ -31840,7 +31928,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void type_body_len: u32 = 0, align_body_len: u32 = 0, init_body_len: u32 = 0, - type_ref: Air.Inst.Ref = .none, + type_ref: Zir.Inst.Ref = .none, }; const fields = try sema.arena.alloc(Field, fields_len); var any_inits = false; @@ -31967,7 +32055,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const field = &struct_obj.fields.values()[field_i]; field.ty = try field_ty.copy(decl_arena_allocator); - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -31981,7 +32069,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void }; return sema.failWithOwnedErrorMsg(msg); } - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -32010,7 +32098,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty))) { + } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) { const msg = msg: { const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -32191,7 +32279,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { if (small.auto_enum_tag) { // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; - if (int_tag_ty.zigTypeTag() != .Int and int_tag_ty.zigTypeTag() != .ComptimeInt) { + if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(sema.mod)}); } @@ -32220,7 +32308,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } else { // The provided type is the enum tag type. union_obj.tag_ty = try provided_ty.copy(decl_arena_allocator); - if (union_obj.tag_ty.zigTypeTag() != .Enum) { + if (union_obj.tag_ty.zigTypeTag(mod) != .Enum) { return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}); } // The fields of the union must match the enum exactly. @@ -32281,7 +32369,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk align_ref; } else .none; - const tag_ref: Zir.Inst.Ref = if (has_tag) blk: { + const tag_ref: Air.Inst.Ref = if (has_tag) blk: { const tag_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; break :blk try sema.resolveInst(tag_ref); @@ -32391,7 +32479,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } } - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -32420,7 +32508,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -32673,6 +32761,29 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { /// that the types are already resolved. /// TODO assert the return value matches `ty.onePossibleValue` pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { + const mod = sema.mod; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return Value.zero; + } else { + return null; + } + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + switch (ty.tag()) { .f16, .f32, @@ -32712,10 +32823,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set, .error_set_merged, .error_union, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, .single_const_pointer_to_comptime_int, .array_sentinel, @@ -32803,7 +32910,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const resolved_ty = try sema.resolveTypeFields(ty); const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; // An explicit tag type is always provided for enum_numbered. - if (enum_obj.tag_ty.hasRuntimeBits()) { + if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { return null; } if (enum_obj.fields.count() == 1) { @@ -32819,7 +32926,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .enum_full => { const resolved_ty = try sema.resolveTypeFields(ty); const enum_obj = resolved_ty.castTag(.enum_full).?.data; - if (enum_obj.tag_ty.hasRuntimeBits()) { + if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { return null; } switch (enum_obj.fields.count()) { @@ -32843,7 +32950,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (tag_ty.zigTypeTag() != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { + if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { return Value.zero; } else { return null; @@ -32883,13 +32990,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .null => return Value.null, .undefined => return Value.initTag(.undef), - .int_unsigned, .int_signed => { - if (ty.cast(Type.Payload.Bits).?.data == 0) { - return Value.zero; - } else { - return null; - } - }, .vector, .array, .array_u8 => { if (ty.arrayLen() == 0) return Value.initTag(.empty_array); @@ -32919,6 +33019,89 @@ pub fn getTmpAir(sema: Sema) Air { } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { + switch (ty.ip_index) { + .u1_type => return .u1_type, + .u8_type => return .u8_type, + .i8_type => return .i8_type, + .u16_type => return .u16_type, + .i16_type => return .i16_type, + .u29_type => return .u29_type, + .u32_type => return .u32_type, + .i32_type => return .i32_type, + .u64_type => return .u64_type, + .i64_type => return .i64_type, + .u80_type => return .u80_type, + .u128_type => return .u128_type, + .i128_type => return .i128_type, + .usize_type => return .usize_type, + .isize_type => return .isize_type, + .c_char_type => return .c_char_type, + .c_short_type => return .c_short_type, + .c_ushort_type => return .c_ushort_type, + .c_int_type => return .c_int_type, + .c_uint_type => return .c_uint_type, + .c_long_type => return .c_long_type, + .c_ulong_type => return .c_ulong_type, + .c_longlong_type => return .c_longlong_type, + .c_ulonglong_type => return .c_ulonglong_type, + .c_longdouble_type => return .c_longdouble_type, + .f16_type => return .f16_type, + .f32_type => return .f32_type, + .f64_type => return .f64_type, + .f80_type => return .f80_type, + .f128_type => return .f128_type, + .anyopaque_type => return .anyopaque_type, + .bool_type => return .bool_type, + .void_type => return .void_type, + .type_type => return .type_type, + .anyerror_type => return .anyerror_type, + .comptime_int_type => return .comptime_int_type, + .comptime_float_type => return .comptime_float_type, + .noreturn_type => return .noreturn_type, + .anyframe_type => return .anyframe_type, + .null_type => return .null_type, + .undefined_type => return .undefined_type, + .enum_literal_type => return .enum_literal_type, + .atomic_order_type => return .atomic_order_type, + .atomic_rmw_op_type => return .atomic_rmw_op_type, + .calling_convention_type => return .calling_convention_type, + .address_space_type => return .address_space_type, + .float_mode_type => return .float_mode_type, + .reduce_op_type => return .reduce_op_type, + .call_modifier_type => return .call_modifier_type, + .prefetch_options_type => return .prefetch_options_type, + .export_options_type => return .export_options_type, + .extern_options_type => return .extern_options_type, + .type_info_type => return .type_info_type, + .manyptr_u8_type => return .manyptr_u8_type, + .manyptr_const_u8_type => return .manyptr_const_u8_type, + .single_const_pointer_to_comptime_int_type => return .single_const_pointer_to_comptime_int_type, + .const_slice_u8_type => return .const_slice_u8_type, + .anyerror_void_error_union_type => return .anyerror_void_error_union_type, + .generic_poison_type => return .generic_poison_type, + .var_args_param_type => return .var_args_param_type, + .empty_struct_type => return .empty_struct_type, + + // values + .undef => unreachable, + .zero => unreachable, + .zero_usize => unreachable, + .one => unreachable, + .one_usize => unreachable, + .calling_convention_c => unreachable, + .calling_convention_inline => unreachable, + .void_value => unreachable, + .unreachable_value => unreachable, + .null_value => unreachable, + .bool_true => unreachable, + .bool_false => unreachable, + .empty_struct => unreachable, + .generic_poison => unreachable, + + _ => {}, + + .none => unreachable, + } switch (ty.tag()) { .u1 => return .u1_type, .u8 => return .u8_type, @@ -32934,6 +33117,7 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .i128 => return .i128_type, .usize => return .usize_type, .isize => return .isize_type, + .c_char => return .c_char_type, .c_short => return .c_short_type, .c_ushort => return .c_ushort_type, .c_int => return .c_int_type, @@ -32966,17 +33150,13 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .address_space => return .address_space_type, .float_mode => return .float_mode_type, .reduce_op => return .reduce_op_type, - .modifier => return .modifier_type, + .modifier => return .call_modifier_type, .prefetch_options => return .prefetch_options_type, .export_options => return .export_options_type, .extern_options => return .extern_options_type, .type_info => return .type_info_type, .manyptr_u8 => return .manyptr_u8_type, .manyptr_const_u8 => return .manyptr_const_u8_type, - .fn_noreturn_no_args => return .fn_noreturn_no_args_type, - .fn_void_no_args => return .fn_void_no_args_type, - .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, .const_slice_u8 => return .const_slice_u8_type, .anyerror_void_error_union => return .anyerror_void_error_union_type, @@ -33186,7 +33366,8 @@ const DerefResult = union(enum) { }; fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type, want_mutable: bool) CompileError!DerefResult { - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) { error.RuntimeLoad => return DerefResult{ .runtime_load = {} }, else => |e| return e, @@ -33211,7 +33392,7 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value // The type is not in-memory coercible or the direct dereference failed, so it must // be bitcast according to the pointer type we are performing the load through. - if (!load_ty.hasWellDefinedLayout()) { + if (!load_ty.hasWellDefinedLayout(mod)) { return DerefResult{ .needed_well_defined = load_ty }; } @@ -33253,6 +33434,7 @@ fn typePtrOrOptionalPtrTy( ty: Type, buf: *Type.Payload.ElemType, ) !?Type { + const mod = sema.mod; switch (ty.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer, @@ -33281,7 +33463,7 @@ fn typePtrOrOptionalPtrTy( .optional => { const child_type = ty.optionalChild(buf); - if (child_type.zigTypeTag() != .Pointer) return null; + if (child_type.zigTypeTag(mod) != .Pointer) return null; const info = child_type.ptrInfo().data; switch (info.size) { @@ -33310,6 +33492,23 @@ fn typePtrOrOptionalPtrTy( /// TODO merge these implementations together with the "advanced"/opt_sema pattern seen /// elsewhere in value.zig pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { + const mod = sema.mod; + if (ty.ip_index != .none) { + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return false, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + } + } return switch (ty.tag()) { .u1, .u8, @@ -33374,8 +33573,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .generic_poison, .array_u8, .array_u8_sentinel_0, - .int_signed, - .int_unsigned, .enum_simple, => false, @@ -33385,11 +33582,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .comptime_float, .enum_literal, .type_info, - // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, => true, @@ -33412,7 +33604,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .mut_slice, => { const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { + if (child_ty.zigTypeTag(mod) == .Fn) { return child_ty.fnInfo().is_generic; } else { return sema.typeRequiresComptime(child_ty); @@ -33504,7 +33696,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - return ty.hasRuntimeBitsAdvanced(false, .{ .sema = sema }) catch |err| switch (err) { + const mod = sema.mod; + return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }; @@ -33512,19 +33705,18 @@ pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { fn typeAbiSize(sema: *Sema, ty: Type) !u64 { try sema.resolveTypeLayout(ty); - const target = sema.mod.getTarget(); - return ty.abiSize(target); + return ty.abiSize(sema.mod); } fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 { - const target = sema.mod.getTarget(); - return (try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar; + return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar; } /// Not valid to call for packed unions. /// Keep implementation in sync with `Module.Union.Field.normalAlignment`. fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 { - if (field.ty.zigTypeTag() == .NoReturn) { + const mod = sema.mod; + if (field.ty.zigTypeTag(mod) == .NoReturn) { return @as(u32, 0); } else if (field.abi_align == 0) { return sema.typeAbiAlignment(field.ty); @@ -33605,13 +33797,14 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { } fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -33620,13 +33813,13 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { } fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { + const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const target = sema.mod.getTarget(); - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -33645,7 +33838,8 @@ fn numberAddWrapScalar( ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); - if (ty.zigTypeTag() == .ComptimeInt) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intAdd(lhs, rhs, ty); } @@ -33663,7 +33857,8 @@ fn intSub( rhs: Value, ty: Type, ) !Value { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; @@ -33678,13 +33873,13 @@ fn intSub( } fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { + const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const target = sema.mod.getTarget(); - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -33703,7 +33898,8 @@ fn numberSubWrapScalar( ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); - if (ty.zigTypeTag() == .ComptimeInt) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intSub(lhs, rhs, ty); } @@ -33721,14 +33917,15 @@ fn floatAdd( rhs: Value, float_type: Type, ) !Value { - if (float_type.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType()); + scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -33778,14 +33975,15 @@ fn floatSub( rhs: Value, float_type: Type, ) !Value { - if (float_type.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType()); + scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -33835,7 +34033,8 @@ fn intSubWithOverflow( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { @@ -33843,7 +34042,7 @@ fn intSubWithOverflow( var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType()); + const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -33861,13 +34060,13 @@ fn intSubWithOverflowScalar( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const target = sema.mod.getTarget(); - const info = ty.intInfo(target); + const mod = sema.mod; + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -33889,13 +34088,14 @@ fn floatToInt( float_ty: Type, int_ty: Type, ) CompileError!Value { - if (float_ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (float_ty.zigTypeTag(mod) == .Vector) { const elem_ty = float_ty.childType(); const result_data = try sema.arena.alloc(Value, float_ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(sema.mod, i, &buf); - scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType()); + scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -33976,7 +34176,8 @@ fn intFitsInType( ty: Type, vector_index: ?*usize, ) CompileError!bool { - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); switch (val.tag()) { .zero, .undef, @@ -33985,9 +34186,9 @@ fn intFitsInType( .one, .bool_true, - => switch (ty.zigTypeTag()) { + => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); return switch (info.signedness) { .signed => info.bits >= 2, .unsigned => info.bits >= 1, @@ -33997,9 +34198,9 @@ fn intFitsInType( else => unreachable, }, - .lazy_align => switch (ty.zigTypeTag()) { + .lazy_align => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); // If it is u16 or bigger we know the alignment fits without resolving it. if (info.bits >= max_needed_bits) return true; @@ -34011,9 +34212,9 @@ fn intFitsInType( .ComptimeInt => return true, else => unreachable, }, - .lazy_size => switch (ty.zigTypeTag()) { + .lazy_size => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); // If it is u64 or bigger we know the size fits without resolving it. if (info.bits >= max_needed_bits) return true; @@ -34026,41 +34227,41 @@ fn intFitsInType( else => unreachable, }, - .int_u64 => switch (ty.zigTypeTag()) { + .int_u64 => switch (ty.zigTypeTag(mod)) { .Int => { const x = val.castTag(.int_u64).?.data; if (x == 0) return true; - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); return info.bits >= needed_bits; }, .ComptimeInt => return true, else => unreachable, }, - .int_i64 => switch (ty.zigTypeTag()) { + .int_i64 => switch (ty.zigTypeTag(mod)) { .Int => { const x = val.castTag(.int_i64).?.data; if (x == 0) return true; - const info = ty.intInfo(target); + const info = ty.intInfo(mod); if (info.signedness == .unsigned and x < 0) return false; var buffer: Value.BigIntSpace = undefined; - return (try val.toBigIntAdvanced(&buffer, target, sema)).fitsInTwosComp(info.signedness, info.bits); + return (try val.toBigIntAdvanced(&buffer, mod, sema)).fitsInTwosComp(info.signedness, info.bits); }, .ComptimeInt => return true, else => unreachable, }, - .int_big_positive => switch (ty.zigTypeTag()) { + .int_big_positive => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); }, .ComptimeInt => return true, else => unreachable, }, - .int_big_negative => switch (ty.zigTypeTag()) { + .int_big_negative => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); }, .ComptimeInt => return true, @@ -34068,7 +34269,7 @@ fn intFitsInType( }, .the_only_possible_value => { - assert(ty.intInfo(target).bits == 0); + assert(ty.intInfo(mod).bits == 0); return true; }, @@ -34077,9 +34278,9 @@ fn intFitsInType( .decl_ref, .function, .variable, - => switch (ty.zigTypeTag()) { + => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const ptr_bits = target.ptrBitWidth(); return switch (info.signedness) { .signed => info.bits > ptr_bits, @@ -34091,9 +34292,9 @@ fn intFitsInType( }, .aggregate => { - assert(ty.zigTypeTag() == .Vector); + assert(ty.zigTypeTag(mod) == .Vector); for (val.castTag(.aggregate).?.data, 0..) |elem, i| { - if (!(try sema.intFitsInType(elem, ty.scalarType(), null))) { + if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) { if (vector_index) |some| some.* = i; return false; } @@ -34122,11 +34323,8 @@ fn intInRange( } /// Asserts the type is an enum. -fn enumHasInt( - sema: *Sema, - ty: Type, - int: Value, -) CompileError!bool { +fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { + const mod = sema.mod; switch (ty.tag()) { .enum_nonexhaustive => unreachable, .enum_full => { @@ -34157,11 +34355,7 @@ fn enumHasInt( const enum_simple = ty.castTag(.enum_simple).?.data; const fields_len = enum_simple.fields.count(); const bits = std.math.log2_int_ceil(usize, fields_len); - var buffer: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - const tag_ty = Type.initPayload(&buffer.base); + const tag_ty = try mod.intType(.unsigned, bits); return sema.intInRange(tag_ty, int, fields_len); }, .atomic_order, @@ -34186,7 +34380,8 @@ fn intAddWithOverflow( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { @@ -34194,7 +34389,7 @@ fn intAddWithOverflow( var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType()); + const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -34212,13 +34407,13 @@ fn intAddWithOverflowScalar( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const target = sema.mod.getTarget(); - const info = ty.intInfo(target); + const mod = sema.mod; + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -34243,14 +34438,15 @@ fn compareAll( rhs: Value, ty: Type, ) CompileError!bool { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < ty.vectorLen()) : (i += 1) { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()))) { + if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) { return false; } } @@ -34270,7 +34466,7 @@ fn compareScalar( switch (op) { .eq => return sema.valuesEqual(lhs, rhs, ty), .neq => return !(try sema.valuesEqual(lhs, rhs, ty)), - else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod.getTarget(), sema), + else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod, sema), } } @@ -34291,14 +34487,15 @@ fn compareVector( rhs: Value, ty: Type, ) !Value { - assert(ty.zigTypeTag() == .Vector); + const mod = sema.mod; + assert(ty.zigTypeTag(mod) == .Vector); const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()); + const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); scalar.* = Value.makeBool(res_bool); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -34312,10 +34509,10 @@ fn compareVector( /// Handles const-ness and address spaces in particular. /// This code is duplicated in `analyzePtrArithmetic`. fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { + const mod = sema.mod; const ptr_info = ptr_ty.ptrInfo().data; - const elem_ty = ptr_ty.elemType2(); + const elem_ty = ptr_ty.elemType2(mod); const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0; - const target = sema.mod.getTarget(); const parent_ty = ptr_ty.childType(); const VI = Type.Payload.Pointer.Data.VectorIndex; @@ -34325,14 +34522,14 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { alignment: u32 = 0, vector_index: VI = .none, } = if (parent_ty.tag() == .vector and ptr_info.size == .One) blk: { - const elem_bits = elem_ty.bitSize(target); + const elem_bits = elem_ty.bitSize(mod); if (elem_bits == 0) break :blk .{}; const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); if (!is_packed) break :blk .{}; break :blk .{ .host_size = @intCast(u16, parent_ty.arrayLen()), - .alignment = @intCast(u16, parent_ty.abiAlignment(target)), + .alignment = @intCast(u16, parent_ty.abiAlignment(mod)), .vector_index = if (offset) |some| @intToEnum(VI, some) else .runtime, }; } else .{}; diff --git a/src/TypedValue.zig b/src/TypedValue.zig index d74fbda93e8d..dc556942c3d6 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -71,7 +71,6 @@ pub fn print( level: u8, mod: *Module, ) @TypeOf(writer).Error!void { - const target = mod.getTarget(); var val = tv.val; var ty = tv.ty; if (val.isVariable(mod)) @@ -117,10 +116,6 @@ pub fn print( .noreturn_type => return writer.writeAll("noreturn"), .null_type => return writer.writeAll("@Type(.Null)"), .undefined_type => return writer.writeAll("@Type(.Undefined)"), - .fn_noreturn_no_args_type => return writer.writeAll("fn() noreturn"), - .fn_void_no_args_type => return writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args_type => return writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args_type => return writer.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int_type => return writer.writeAll("*const comptime_int"), .anyframe_type => return writer.writeAll("anyframe"), .const_slice_u8_type => return writer.writeAll("[]const u8"), @@ -147,7 +142,7 @@ pub fn print( if (level == 0) { return writer.writeAll(".{ ... }"); } - if (ty.zigTypeTag() == .Struct) { + if (ty.zigTypeTag(mod) == .Struct) { try writer.writeAll(".{"); const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); @@ -160,7 +155,7 @@ pub fn print( } try print(.{ .ty = ty.structFieldType(i), - .val = val.fieldValue(ty, i), + .val = val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (ty.structFieldCount() > max_aggregate_items) { @@ -168,7 +163,7 @@ pub fn print( } return writer.writeAll("}"); } else { - const elem_ty = ty.elemType2(); + const elem_ty = ty.elemType2(mod); const len = ty.arrayLen(); if (elem_ty.eql(Type.u8, mod)) str: { @@ -177,9 +172,9 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = val.fieldValue(ty, i); + const elem = val.fieldValue(ty, mod, i); if (elem.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem.toUnsignedInt(target)) orelse break :str; + buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; } const truncated = if (len > max_string_len) " (truncated)" else ""; @@ -194,7 +189,7 @@ pub fn print( if (i != 0) try writer.writeAll(", "); try print(.{ .ty = elem_ty, - .val = val.fieldValue(ty, i), + .val = val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (len > max_aggregate_items) { @@ -232,25 +227,18 @@ pub fn print( .bool_true => return writer.writeAll("true"), .bool_false => return writer.writeAll("false"), .ty => return val.castTag(.ty).?.data.print(writer, mod), - .int_type => { - const int_type = val.castTag(.int_type).?.data; - return writer.print("{s}{d}", .{ - if (int_type.signed) "s" else "u", - int_type.bits, - }); - }, .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), .int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), .int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), .lazy_align => { const sub_ty = val.castTag(.lazy_align).?.data; - const x = sub_ty.abiAlignment(target); + const x = sub_ty.abiAlignment(mod); return writer.print("{d}", .{x}); }, .lazy_size => { const sub_ty = val.castTag(.lazy_size).?.data; - const x = sub_ty.abiSize(target); + const x = sub_ty.abiSize(mod); return writer.print("{d}", .{x}); }, .function => return writer.print("(function '{s}')", .{ @@ -315,7 +303,7 @@ pub fn print( }, writer, level - 1, mod); } - if (field_ptr.container_ty.zigTypeTag() == .Struct) { + if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) { switch (field_ptr.container_ty.tag()) { .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), else => { @@ -323,7 +311,7 @@ pub fn print( return writer.print(".{s}", .{field_name}); }, } - } else if (field_ptr.container_ty.zigTypeTag() == .Union) { + } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; return writer.print(".{s}", .{field_name}); } else if (field_ptr.container_ty.isSlice()) { @@ -352,7 +340,7 @@ pub fn print( var i: u32 = 0; try writer.writeAll(".{ "); const elem_tv = TypedValue{ - .ty = ty.elemType2(), + .ty = ty.elemType2(mod), .val = val.castTag(.repeated).?.data, }; const len = ty.arrayLen(); @@ -372,7 +360,7 @@ pub fn print( } try writer.writeAll(".{ "); try print(.{ - .ty = ty.elemType2(), + .ty = ty.elemType2(mod), .val = ty.sentinel().?, }, writer, level - 1, mod); return writer.writeAll(" }"); @@ -382,8 +370,8 @@ pub fn print( return writer.writeAll(".{ ... }"); } const payload = val.castTag(.slice).?.data; - const elem_ty = ty.elemType2(); - const len = payload.len.toUnsignedInt(target); + const elem_ty = ty.elemType2(mod); + const len = payload.len.toUnsignedInt(mod); if (elem_ty.eql(Type.u8, mod)) str: { const max_len = @intCast(usize, std.math.min(len, max_string_len)); @@ -394,7 +382,7 @@ pub fn print( var elem_buf: Value.ElemValueBuffer = undefined; const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); if (elem_val.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(target)) orelse break :str; + buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; } // TODO would be nice if this had a bit of unicode awareness. diff --git a/src/Zir.zig b/src/Zir.zig index 2bd5b21f7913..1063377fc7de 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -19,6 +19,7 @@ const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Ast = std.zig.Ast; +const InternPool = @import("InternPool.zig"); const Zir = @This(); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; @@ -2041,448 +2042,95 @@ pub const Inst = struct { /// The position of a ZIR instruction within the `Zir` instructions array. pub const Index = u32; - /// A reference to a TypedValue or ZIR instruction. + /// A reference to ZIR instruction, or to an InternPool index, or neither. /// - /// If the Ref has a tag in this enum, it refers to a TypedValue. - /// - /// If the value of a Ref does not have a tag, it refers to a ZIR instruction. - /// - /// The first values after the the last tag refer to ZIR instructions which may - /// be derived by subtracting `typed_value_map.len`. - /// - /// When adding a tag to this enum, consider adding a corresponding entry to - /// `primitives` in astgen. + /// If the integer tag value is < InternPool.static_len, then it + /// corresponds to an InternPool index. Otherwise, this refers to a ZIR + /// instruction. /// /// The tag type is specified so that it is safe to bitcast between `[]u32` /// and `[]Ref`. pub const Ref = enum(u32) { + u1_type = @enumToInt(InternPool.Index.u1_type), + u8_type = @enumToInt(InternPool.Index.u8_type), + i8_type = @enumToInt(InternPool.Index.i8_type), + u16_type = @enumToInt(InternPool.Index.u16_type), + i16_type = @enumToInt(InternPool.Index.i16_type), + u29_type = @enumToInt(InternPool.Index.u29_type), + u32_type = @enumToInt(InternPool.Index.u32_type), + i32_type = @enumToInt(InternPool.Index.i32_type), + u64_type = @enumToInt(InternPool.Index.u64_type), + i64_type = @enumToInt(InternPool.Index.i64_type), + u80_type = @enumToInt(InternPool.Index.u80_type), + u128_type = @enumToInt(InternPool.Index.u128_type), + i128_type = @enumToInt(InternPool.Index.i128_type), + usize_type = @enumToInt(InternPool.Index.usize_type), + isize_type = @enumToInt(InternPool.Index.isize_type), + c_char_type = @enumToInt(InternPool.Index.c_char_type), + c_short_type = @enumToInt(InternPool.Index.c_short_type), + c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type), + c_int_type = @enumToInt(InternPool.Index.c_int_type), + c_uint_type = @enumToInt(InternPool.Index.c_uint_type), + c_long_type = @enumToInt(InternPool.Index.c_long_type), + c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type), + c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type), + c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type), + c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type), + f16_type = @enumToInt(InternPool.Index.f16_type), + f32_type = @enumToInt(InternPool.Index.f32_type), + f64_type = @enumToInt(InternPool.Index.f64_type), + f80_type = @enumToInt(InternPool.Index.f80_type), + f128_type = @enumToInt(InternPool.Index.f128_type), + anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type), + bool_type = @enumToInt(InternPool.Index.bool_type), + void_type = @enumToInt(InternPool.Index.void_type), + type_type = @enumToInt(InternPool.Index.type_type), + anyerror_type = @enumToInt(InternPool.Index.anyerror_type), + comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type), + comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type), + noreturn_type = @enumToInt(InternPool.Index.noreturn_type), + anyframe_type = @enumToInt(InternPool.Index.anyframe_type), + null_type = @enumToInt(InternPool.Index.null_type), + undefined_type = @enumToInt(InternPool.Index.undefined_type), + enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type), + atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type), + atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type), + calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type), + address_space_type = @enumToInt(InternPool.Index.address_space_type), + float_mode_type = @enumToInt(InternPool.Index.float_mode_type), + reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type), + call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type), + prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type), + export_options_type = @enumToInt(InternPool.Index.export_options_type), + extern_options_type = @enumToInt(InternPool.Index.extern_options_type), + type_info_type = @enumToInt(InternPool.Index.type_info_type), + manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type), + manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), + single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), + const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), + anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), + generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), + var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), + empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), + undef = @enumToInt(InternPool.Index.undef), + zero = @enumToInt(InternPool.Index.zero), + zero_usize = @enumToInt(InternPool.Index.zero_usize), + one = @enumToInt(InternPool.Index.one), + one_usize = @enumToInt(InternPool.Index.one_usize), + calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), + calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), + void_value = @enumToInt(InternPool.Index.void_value), + unreachable_value = @enumToInt(InternPool.Index.unreachable_value), + null_value = @enumToInt(InternPool.Index.null_value), + bool_true = @enumToInt(InternPool.Index.bool_true), + bool_false = @enumToInt(InternPool.Index.bool_false), + empty_struct = @enumToInt(InternPool.Index.empty_struct), + generic_poison = @enumToInt(InternPool.Index.generic_poison), + /// This Ref does not correspond to any ZIR instruction or constant /// value and may instead be used as a sentinel to indicate null. - none, - - u1_type, - u8_type, - i8_type, - u16_type, - i16_type, - u29_type, - u32_type, - i32_type, - u64_type, - i64_type, - u128_type, - i128_type, - usize_type, - isize_type, - c_char_type, - c_short_type, - c_ushort_type, - c_int_type, - c_uint_type, - c_long_type, - c_ulong_type, - c_longlong_type, - c_ulonglong_type, - c_longdouble_type, - f16_type, - f32_type, - f64_type, - f80_type, - f128_type, - anyopaque_type, - bool_type, - void_type, - type_type, - anyerror_type, - comptime_int_type, - comptime_float_type, - noreturn_type, - anyframe_type, - null_type, - undefined_type, - enum_literal_type, - atomic_order_type, - atomic_rmw_op_type, - calling_convention_type, - address_space_type, - float_mode_type, - reduce_op_type, - modifier_type, - prefetch_options_type, - export_options_type, - extern_options_type, - type_info_type, - manyptr_u8_type, - manyptr_const_u8_type, - fn_noreturn_no_args_type, - fn_void_no_args_type, - fn_naked_noreturn_no_args_type, - fn_ccc_void_no_args_type, - single_const_pointer_to_comptime_int_type, - const_slice_u8_type, - anyerror_void_error_union_type, - generic_poison_type, - - /// `undefined` (untyped) - undef, - /// `0` (comptime_int) - zero, - /// `1` (comptime_int) - one, - /// `{}` - void_value, - /// `unreachable` (noreturn type) - unreachable_value, - /// `null` (untyped) - null_value, - /// `true` - bool_true, - /// `false` - bool_false, - /// `.{}` (untyped) - empty_struct, - /// `0` (usize) - zero_usize, - /// `1` (usize) - one_usize, - /// `std.builtin.CallingConvention.C` - calling_convention_c, - /// `std.builtin.CallingConvention.Inline` - calling_convention_inline, - /// Used for generic parameters where the type and value - /// is not known until generic function instantiation. - generic_poison, - /// This is a special type for variadic parameters of a function call. - /// Casts to it will validate that the type can be passed to a c - /// calling convention function. - var_args_param, - + none = std.math.maxInt(u32), _, - - pub const typed_value_map = std.enums.directEnumArray(Ref, TypedValue, 0, .{ - .none = undefined, - - .u1_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u1_type), - }, - .u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u8_type), - }, - .i8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i8_type), - }, - .u16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u16_type), - }, - .i16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i16_type), - }, - .u29_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u29_type), - }, - .u32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u32_type), - }, - .i32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i32_type), - }, - .u64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u64_type), - }, - .i64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i64_type), - }, - .u128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u128_type), - }, - .i128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i128_type), - }, - .usize_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.usize_type), - }, - .isize_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.isize_type), - }, - .c_char_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_char_type), - }, - .c_short_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_short_type), - }, - .c_ushort_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ushort_type), - }, - .c_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_int_type), - }, - .c_uint_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_uint_type), - }, - .c_long_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_long_type), - }, - .c_ulong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ulong_type), - }, - .c_longlong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_longlong_type), - }, - .c_ulonglong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ulonglong_type), - }, - .c_longdouble_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_longdouble_type), - }, - .f16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f16_type), - }, - .f32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f32_type), - }, - .f64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f64_type), - }, - .f80_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f80_type), - }, - .f128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f128_type), - }, - .anyopaque_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyopaque_type), - }, - .bool_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.bool_type), - }, - .void_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.void_type), - }, - .type_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.type_type), - }, - .anyerror_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyerror_type), - }, - .comptime_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.comptime_int_type), - }, - .comptime_float_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.comptime_float_type), - }, - .noreturn_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.noreturn_type), - }, - .anyframe_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyframe_type), - }, - .null_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.null_type), - }, - .undefined_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.undefined_type), - }, - .fn_noreturn_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_noreturn_no_args_type), - }, - .fn_void_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_void_no_args_type), - }, - .fn_naked_noreturn_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_naked_noreturn_no_args_type), - }, - .fn_ccc_void_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_ccc_void_no_args_type), - }, - .single_const_pointer_to_comptime_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.single_const_pointer_to_comptime_int_type), - }, - .const_slice_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.const_slice_u8_type), - }, - .anyerror_void_error_union_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyerror_void_error_union_type), - }, - .generic_poison_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.generic_poison_type), - }, - .enum_literal_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.enum_literal_type), - }, - .manyptr_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.manyptr_u8_type), - }, - .manyptr_const_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.manyptr_const_u8_type), - }, - .atomic_order_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.atomic_order_type), - }, - .atomic_rmw_op_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.atomic_rmw_op_type), - }, - .calling_convention_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.calling_convention_type), - }, - .address_space_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.address_space_type), - }, - .float_mode_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.float_mode_type), - }, - .reduce_op_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.reduce_op_type), - }, - .modifier_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.modifier_type), - }, - .prefetch_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.prefetch_options_type), - }, - .export_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.export_options_type), - }, - .extern_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.extern_options_type), - }, - .type_info_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.type_info_type), - }, - - .undef = .{ - .ty = Type.initTag(.undefined), - .val = Value.initTag(.undef), - }, - .zero = .{ - .ty = Type.initTag(.comptime_int), - .val = Value.initTag(.zero), - }, - .zero_usize = .{ - .ty = Type.initTag(.usize), - .val = Value.initTag(.zero), - }, - .one = .{ - .ty = Type.initTag(.comptime_int), - .val = Value.initTag(.one), - }, - .one_usize = .{ - .ty = Type.initTag(.usize), - .val = Value.initTag(.one), - }, - .void_value = .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }, - .unreachable_value = .{ - .ty = Type.initTag(.noreturn), - .val = Value.initTag(.unreachable_value), - }, - .null_value = .{ - .ty = Type.initTag(.null), - .val = Value.initTag(.null_value), - }, - .bool_true = .{ - .ty = Type.initTag(.bool), - .val = Value.initTag(.bool_true), - }, - .bool_false = .{ - .ty = Type.initTag(.bool), - .val = Value.initTag(.bool_false), - }, - .empty_struct = .{ - .ty = Type.initTag(.empty_struct_literal), - .val = Value.initTag(.empty_struct_value), - }, - .calling_convention_c = .{ - .ty = Type.initTag(.calling_convention), - .val = .{ .ptr_otherwise = &calling_convention_c_payload.base }, - }, - .calling_convention_inline = .{ - .ty = Type.initTag(.calling_convention), - .val = .{ .ptr_otherwise = &calling_convention_inline_payload.base }, - }, - .generic_poison = .{ - .ty = Type.initTag(.generic_poison), - .val = Value.initTag(.generic_poison), - }, - .var_args_param = undefined, - }); - }; - - /// We would like this to be const but `Value` wants a mutable pointer for - /// its payload field. Nothing should mutate this though. - var calling_convention_c_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @enumToInt(std.builtin.CallingConvention.C), - }; - - /// We would like this to be const but `Value` wants a mutable pointer for - /// its payload field. Nothing should mutate this though. - var calling_convention_inline_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @enumToInt(std.builtin.CallingConvention.Inline), }; /// All instructions have an 8-byte payload, which is contained within @@ -4163,7 +3811,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { }; } -const ref_start_index: u32 = Inst.Ref.typed_value_map.len; +const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Inst.Index) Inst.Ref { return @intToEnum(Inst.Ref, ref_start_index + inst); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 971ed4749d05..4370977272a0 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -471,6 +471,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { + const mod = self.bin_file.options.module.?; const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { // stp fp, lr, [sp, #-16]! @@ -522,8 +523,8 @@ fn gen(self: *Self) !void { const ty = self.air.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const abi_align = ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -951,8 +952,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -1026,31 +1027,31 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { + const mod = self.bin_file.options.module.?; const elem_ty = self.air.typeOfIndex(inst).elemType(); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBits(mod)) { // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized // allocations will always have an offset > 0. return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -1177,13 +1178,14 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + const mod = self.bin_file.options.module.?; const operand = ty_op.operand; const operand_mcv = try self.resolveInst(operand); const operand_ty = self.air.typeOf(operand); - const operand_info = operand_ty.intInfo(self.target.*); + const operand_info = operand_ty.intInfo(mod); const dest_ty = self.air.typeOfIndex(inst); - const dest_info = dest_ty.intInfo(self.target.*); + const dest_info = dest_ty.intInfo(mod); const result: MCValue = result: { const operand_lock: ?RegisterLock = switch (operand_mcv) { @@ -1257,8 +1259,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 64) { const operand_reg = switch (operand) { @@ -1319,6 +1322,7 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); @@ -1327,7 +1331,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { .unreach => unreachable, .compare_flags => |cond| break :result MCValue{ .compare_flags = cond.negate() }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { // TODO convert this to mvn + and const op_reg = switch (operand) { @@ -1361,7 +1365,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 64) { const op_reg = switch (operand) { .register => |r| r, @@ -1413,13 +1417,13 @@ fn minMax( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -1907,12 +1911,12 @@ fn addSub( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -1968,11 +1972,11 @@ fn mul( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO add optimisations for multiplication // with immediates, for example a * 2 can be @@ -1999,7 +2003,8 @@ fn divFloat( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div_float", .{}), .Vector => return self.fail("TODO div_float on vectors", .{}), else => unreachable, @@ -2015,12 +2020,12 @@ fn divTrunc( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2049,12 +2054,12 @@ fn divFloor( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2082,12 +2087,12 @@ fn divExact( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2118,12 +2123,12 @@ fn rem( _ = maybe_inst; const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO rem/mod on floats", .{}), .Vector => return self.fail("TODO rem/mod on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -2188,7 +2193,8 @@ fn modulo( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO mod on floats", .{}), .Vector => return self.fail("TODO mod on vectors", .{}), .Int => return self.fail("TODO mod on ints", .{}), @@ -2205,10 +2211,11 @@ fn wrappingArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Generate an add/sub/mul const result: MCValue = switch (tag) { @@ -2240,11 +2247,11 @@ fn bitwise( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO implement bitwise operations with immediates const mir_tag: Mir.Inst.Tag = switch (tag) { @@ -2274,10 +2281,11 @@ fn shiftExact( ) InnerError!MCValue { _ = rhs_ty; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -2323,10 +2331,11 @@ fn shiftNormal( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Generate a shl_exact/shr_exact const result: MCValue = switch (tag) { @@ -2362,7 +2371,8 @@ fn booleanOp( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { assert((try lhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema assert((try rhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema @@ -2388,9 +2398,9 @@ fn ptrArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { - const mod = self.bin_file.options.module.?; assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; @@ -2398,7 +2408,7 @@ fn ptrArithmetic( .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -2511,6 +2521,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -2518,16 +2529,15 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 1...31, 33...63 => { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2639,24 +2649,23 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { - const mod = self.bin_file.options.module.?; - const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2864,6 +2873,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -2871,14 +2881,14 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -3011,10 +3021,11 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { } fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue { + const mod = self.bin_file.options.module.?; var opt_buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&opt_buf); - if (!payload_ty.hasRuntimeBits()) return MCValue.none; - if (optional_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBits(mod)) return MCValue.none; + if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we reuse the operand here? const raw_reg = try self.register_manager.allocReg(inst, gp); const reg = self.registerAlias(raw_reg, payload_ty); @@ -3055,16 +3066,17 @@ fn errUnionErr( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); + const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3086,7 +3098,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8; + const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -3134,16 +3146,17 @@ fn errUnionPayload( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return try error_union_bind.resolveToMcv(self); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3165,10 +3178,10 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8; + const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ // Set both registers to the X variant to get the full width @@ -3245,6 +3258,7 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { @@ -3253,7 +3267,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBits(mod)) { break :result MCValue{ .immediate = 1 }; } @@ -3265,7 +3279,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we check if we can reuse the operand? const raw_reg = try self.register_manager.allocReg(inst, gp); const reg = self.registerAlias(raw_reg, payload_ty); @@ -3273,9 +3287,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .register = reg }; } - const optional_abi_size = @intCast(u32, optional_ty.abiSize(self.target.*)); - const optional_abi_align = optional_ty.abiAlignment(self.target.*); - const offset = @intCast(u32, payload_ty.abiSize(self.target.*)); + const optional_abi_size = @intCast(u32, optional_ty.abiSize(mod)); + const optional_abi_align = optional_ty.abiAlignment(mod); + const offset = @intCast(u32, payload_ty.abiSize(mod)); const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst); try self.genSetStack(payload_ty, stack_offset, operand); @@ -3289,19 +3303,20 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); @@ -3314,17 +3329,18 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); @@ -3440,8 +3456,9 @@ fn ptrElemVal( ptr_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); // TODO optimize for elem_sizes of 1, 2, 4, 8 switch (elem_size) { @@ -3597,8 +3614,9 @@ fn reuseOperand( } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { + const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.elemType(); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -3846,9 +3864,10 @@ fn genInlineMemsetCode( fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -3874,11 +3893,12 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate, 4 => .ldr_immediate, 8 => .ldr_immediate, 3, 5, 6, 7 => return self.fail("TODO: genLdrRegister for more abi_sizes", .{}), @@ -3896,7 +3916,8 @@ fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type } fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb_immediate, @@ -3917,8 +3938,9 @@ fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { + const mod = self.bin_file.options.module.?; log.debug("store: storing {} to {}", .{ value, ptr }); - const abi_size = value_ty.abiSize(self.target.*); + const abi_size = value_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4069,10 +4091,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.air.typeOf(operand); const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4093,10 +4116,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); const struct_field_ty = struct_ty.structFieldType(index); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .dead, .unreach => unreachable, @@ -4142,12 +4166,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); const struct_ty = self.air.getRefType(ty_pl.ty).childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -4223,8 +4248,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const ty = self.air.typeOf(callee); + const mod = self.bin_file.options.module.?; - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, @@ -4246,8 +4272,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (info.return_value == .stack_offset) { log.debug("airCall: return by reference", .{}); const ret_ty = fn_ty.fnReturnType(); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); + const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); const ret_ptr_reg = self.registerAlias(.x0, Type.usize); @@ -4289,8 +4315,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - const mod = self.bin_file.options.module.?; - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -4369,7 +4394,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(ty, .x30, mcv); @@ -4410,11 +4435,12 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (self.ret_mcv) { .none => {}, .immediate => { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); }, .register => |reg| { // Return result by value @@ -4465,8 +4491,9 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const abi_align = ret_ty.abiAlignment(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4501,21 +4528,21 @@ fn cmp( lhs_ty: Type, op: math.CompareOperator, ) !MCValue { - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO ARM cmp non-pointer optionals", .{}); } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(), .Int => lhs_ty, .Bool => Type.initTag(.u1), .Pointer => Type.usize, @@ -4523,7 +4550,7 @@ fn cmp( else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 64) { try self.spillCompareFlagsIfOccupied(); @@ -4687,8 +4714,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -4819,13 +4846,14 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { - const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional()) blk: { + const mod = self.bin_file.options.module.?; + const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: { var buf: Type.Payload.ElemType = undefined; const payload_ty = operand_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :blk .{ .ty = operand_ty, .bind = operand_bind }; - const offset = @intCast(u32, payload_ty.abiSize(self.target.*)); + const offset = @intCast(u32, payload_ty.abiSize(mod)); const operand_mcv = try operand_bind.resolveToMcv(self); const new_mcv: MCValue = switch (operand_mcv) { .register => |source_reg| new: { @@ -4838,7 +4866,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { try self.genSetReg(payload_ty, dest_reg, operand_mcv); } else { _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.asr_immediate else Mir.Inst.Tag.lsr_immediate, @@ -5210,9 +5238,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { + const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + if (self.air.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5386,7 +5415,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5445,7 +5475,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const raw_cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); @@ -5559,6 +5589,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5669,13 +5700,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void try self.genLdrRegister(reg, reg.toX(), ty); }, .stack_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); switch (abi_size) { 1, 2, 4, 8 => { const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack, 4, 8 => .ldr_stack, else => unreachable, // unexpected abi size }; @@ -5693,13 +5724,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .stack_argument_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); switch (abi_size) { 1, 2, 4, 8 => { const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, 4, 8 => .ldr_stack_argument, else => unreachable, // unexpected abi size }; @@ -5720,7 +5751,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5728,7 +5760,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. - switch (ty.abiSize(self.target.*)) { + switch (ty.abiSize(mod)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), @@ -6087,14 +6119,15 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn airTry(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.air.typeOf(pl_op.operand); - const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const error_union_align = error_union_ty.abiAlignment(self.target.*); + const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the // error union after the body in order to extract the payload @@ -6123,22 +6156,18 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.value(inst, mod).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { .constant => { // Constants have static lifetimes, so they are always memoized in the outer most table. @@ -6222,6 +6251,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (cc) { .Naked => { @@ -6236,14 +6266,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var ncrn: usize = 0; // Next Core Register Number var nsaa: u32 = 0; // Next stacked argument address - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 8) { result.return_value = .{ .register = self.registerAlias(c_abi_int_return_regs[0], ret_ty) }; @@ -6253,7 +6283,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.abiSize(mod)); if (param_size == 0) { result.args[i] = .{ .none = {} }; continue; @@ -6261,7 +6291,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // We round up NCRN only for non-Apple platforms which allow the 16-byte aligned // values to spread across odd-numbered registers. - if (ty.abiAlignment(self.target.*) == 16 and !self.target.isDarwin()) { + if (ty.abiAlignment(mod) == 16 and !self.target.isDarwin()) { // Round up NCRN to the next even number ncrn += ncrn % 2; } @@ -6279,7 +6309,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { ncrn = 8; // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided // that the entire stack space consumed by the arguments is 8-byte aligned. - if (ty.abiAlignment(self.target.*) == 8) { + if (ty.abiAlignment(mod) == 8) { if (nsaa % 8 != 0) { nsaa += 8 - (nsaa % 8); } @@ -6294,14 +6324,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { result.stack_align = 16; }, .Unspecified => { - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 8) { result.return_value = .{ .register = self.registerAlias(.x0, ret_ty) }; @@ -6318,9 +6348,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; for (param_types, 0..) |ty, i| { - if (ty.abiSize(self.target.*) > 0) { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - const param_alignment = ty.abiAlignment(self.target.*); + if (ty.abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.abiSize(mod)); + const param_alignment = ty.abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; @@ -6371,7 +6401,8 @@ fn parseRegName(name: []const u8) ?Register { } fn registerAlias(self: *Self, reg: Register, ty: Type) Register { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (reg.class()) { .general_purpose => { diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 0c48f33ea1a1..cbfd6a11717c 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -4,6 +4,7 @@ const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = union(enum) { memory, @@ -14,40 +15,40 @@ pub const Class = union(enum) { }; /// For `float_array` the second element will be the amount of floats. -pub fn classifyType(ty: Type, target: std.Target) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *const Module) Class { + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { if (ty.containerLayout() == .Packed) return .byval; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 128) return .memory; if (bit_size > 64) return .double_integer; return .integer; }, .Union => { if (ty.containerLayout() == .Packed) return .byval; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 128) return .memory; if (bit_size > 64) return .double_integer; return .integer; }, .Int, .Enum, .ErrorSet, .Float, .Bool => return .byval, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); // TODO is this controlled by a cpu feature? if (bit_size > 128) return .memory; return .byval; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { @@ -73,14 +74,15 @@ pub fn classifyType(ty: Type, target: std.Target) Class { } const sret_float_count = 4; -fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { +fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u8 { + const target = mod.getTarget(); const invalid = std.math.maxInt(u8); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Union => { const fields = ty.unionFields(); var max_count: u8 = 0; for (fields.values()) |field| { - const field_count = countFloats(field.ty, target, maybe_float_bits); + const field_count = countFloats(field.ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; if (field_count > max_count) max_count = field_count; if (max_count > sret_float_count) return invalid; @@ -93,7 +95,7 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { var i: u32 = 0; while (i < fields_len) : (i += 1) { const field_ty = ty.structFieldType(i); - const field_count = countFloats(field_ty, target, maybe_float_bits); + const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; if (count > sret_float_count) return invalid; @@ -113,12 +115,12 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { } } -pub fn getFloatArrayType(ty: Type) ?Type { - switch (ty.zigTypeTag()) { +pub fn getFloatArrayType(ty: Type, mod: *const Module) ?Type { + switch (ty.zigTypeTag(mod)) { .Union => { const fields = ty.unionFields(); for (fields.values()) |field| { - if (getFloatArrayType(field.ty)) |some| return some; + if (getFloatArrayType(field.ty, mod)) |some| return some; } return null; }, @@ -127,7 +129,7 @@ pub fn getFloatArrayType(ty: Type) ?Type { var i: u32 = 0; while (i < fields_len) : (i += 1) { const field_ty = ty.structFieldType(i); - if (getFloatArrayType(field_ty)) |some| return some; + if (getFloatArrayType(field_ty, mod)) |some| return some; } return null; }, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index bdc1627bd6a0..4c7151cd470a 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -520,8 +520,9 @@ fn gen(self: *Self) !void { const ty = self.air.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const abi_align = ty.abiAlignment(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -937,8 +938,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -1006,9 +1007,10 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { + const mod = self.bin_file.options.module.?; const elem_ty = self.air.typeOfIndex(inst).elemType(); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized @@ -1016,22 +1018,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -1158,10 +1159,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const operand_ty = self.air.typeOf(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); - const operand_abi_size = operand_ty.abiSize(self.target.*); - const dest_abi_size = dest_ty.abiSize(self.target.*); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const operand_abi_size = operand_ty.abiSize(mod); + const dest_abi_size = dest_ty.abiSize(mod); + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); const dst_mcv: MCValue = blk: { if (info_a.bits == info_b.bits) { @@ -1215,8 +1217,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 32) { if (info_a.bits > 32) { @@ -1278,6 +1281,7 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; const operand_ty = self.air.typeOf(ty_op.operand); @@ -1286,7 +1290,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { .unreach => unreachable, .cpsr_flags => |cond| break :result MCValue{ .cpsr_flags = cond.negate() }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { var op_reg: Register = undefined; var dest_reg: Register = undefined; @@ -1319,7 +1323,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 32) { var op_reg: Register = undefined; var dest_reg: Register = undefined; @@ -1373,13 +1377,13 @@ fn minMax( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -1582,6 +1586,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -1589,16 +1594,15 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits < 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -1695,6 +1699,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -1702,16 +1707,15 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 16) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -1859,19 +1863,20 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2017,7 +2022,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const optional_ty = self.air.typeOfIndex(inst); - const abi_size = @intCast(u32, optional_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, optional_ty.abiSize(mod)); // Optional with a zero-bit payload type is just a boolean true if (abi_size == 1) { @@ -2036,16 +2042,17 @@ fn errUnionErr( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); + const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2067,7 +2074,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8; + const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -2112,16 +2119,17 @@ fn errUnionPayload( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return try error_union_bind.resolveToMcv(self); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2143,10 +2151,10 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8; + const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, @@ -2221,19 +2229,20 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); @@ -2244,19 +2253,20 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); @@ -2361,7 +2371,8 @@ fn ptrElemVal( maybe_inst: ?Air.Inst.Index, ) !MCValue { const elem_ty = ptr_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (elem_size) { 1, 4 => { @@ -2647,7 +2658,8 @@ fn reuseOperand( fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const elem_ty = ptr_ty.elemType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (ptr) { .none => unreachable, @@ -2722,10 +2734,11 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -2734,7 +2747,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.dead; const dest_mcv: MCValue = blk: { - const ptr_fits_dest = elem_ty.abiSize(self.target.*) <= 4; + const ptr_fits_dest = elem_ty.abiSize(mod) <= 4; if (ptr_fits_dest and self.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; @@ -2750,7 +2763,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const elem_size = @intCast(u32, value_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_size = @intCast(u32, value_ty.abiSize(mod)); switch (ptr) { .none => unreachable, @@ -2869,10 +2883,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.air.typeOf(operand); const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -2892,10 +2907,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const operand = extra.struct_operand; const index = extra.field_index; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); const struct_field_ty = struct_ty.structFieldType(index); switch (mcv) { @@ -2959,10 +2975,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { ); const field_bit_offset = struct_field_offset * 8; - const field_bit_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)) * 8; + const field_bit_size = @intCast(u32, struct_field_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (struct_field_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, @@ -2981,17 +2997,18 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); const struct_ty = self.air.getRefType(ty_pl.ty).childType(); - if (struct_ty.zigTypeTag() == .Union) { + if (struct_ty.zigTypeTag(mod) == .Union) { return self.fail("TODO implement @fieldParentPtr codegen for unions", .{}); } - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -3375,12 +3392,12 @@ fn addSub( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3431,12 +3448,12 @@ fn mul( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // TODO add optimisations for multiplication // with immediates, for example a * 2 can be @@ -3463,7 +3480,8 @@ fn divFloat( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), else => unreachable, @@ -3479,12 +3497,12 @@ fn divTrunc( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3522,12 +3540,12 @@ fn divFloor( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3569,7 +3587,8 @@ fn divExact( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => return self.fail("TODO ARM div_exact", .{}), @@ -3586,12 +3605,12 @@ fn rem( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3654,7 +3673,8 @@ fn modulo( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => return self.fail("TODO ARM mod", .{}), @@ -3671,10 +3691,11 @@ fn wrappingArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // Generate an add/sub/mul const result: MCValue = switch (tag) { @@ -3708,12 +3729,12 @@ fn bitwise( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3753,16 +3774,17 @@ fn shiftExact( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const rhs_immediate = try rhs_bind.resolveToImmediate(self); const mir_tag: Mir.Inst.Tag = switch (tag) { .shl_exact => .lsl, - .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) { + .shr_exact => switch (lhs_ty.intInfo(mod).signedness) { .signed => Mir.Inst.Tag.asr, .unsigned => Mir.Inst.Tag.lsr, }, @@ -3791,10 +3813,11 @@ fn shiftNormal( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // Generate a shl_exact/shr_exact const result: MCValue = switch (tag) { @@ -3833,7 +3856,8 @@ fn booleanOp( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3866,9 +3890,9 @@ fn ptrArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { - const mod = self.bin_file.options.module.?; assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; @@ -3876,7 +3900,7 @@ fn ptrArithmetic( .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), }; - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -3903,11 +3927,12 @@ fn ptrArithmetic( } fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh, 3, 4 => .ldr, else => unreachable, }; @@ -3924,7 +3949,7 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) } }; const data: Mir.Inst.Data = switch (abi_size) { - 1 => if (ty.isSignedInt()) rr_extra_offset else rr_offset, + 1 => if (ty.isSignedInt(mod)) rr_extra_offset else rr_offset, 2 => rr_extra_offset, 3, 4 => rr_offset, else => unreachable, @@ -3937,7 +3962,8 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) } fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -4197,8 +4223,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const ty = self.air.typeOf(callee); + const mod = self.bin_file.options.module.?; - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, @@ -4226,8 +4253,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: { log.debug("airCall: return by reference", .{}); const ret_ty = fn_ty.fnReturnType(); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); + const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); var ptr_ty_payload: Type.Payload.ElemType = .{ @@ -4270,7 +4297,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -4294,7 +4321,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(Type.initTag(.usize), .lr, mcv); @@ -4356,11 +4383,12 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (self.ret_mcv) { .none => {}, .immediate => { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); }, .register => |reg| { // Return result by value @@ -4411,8 +4439,9 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const abi_align = ret_ty.abiAlignment(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4448,21 +4477,21 @@ fn cmp( lhs_ty: Type, op: math.CompareOperator, ) !MCValue { - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO ARM cmp non-pointer optionals", .{}); } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(), .Int => lhs_ty, .Bool => Type.initTag(.u1), .Pointer => Type.usize, @@ -4470,7 +4499,7 @@ fn cmp( else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 32) { try self.spillCompareFlagsIfOccupied(); @@ -4636,8 +4665,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -4772,8 +4801,9 @@ fn isNull( operand_bind: ReadArg.Bind, operand_ty: Type, ) !MCValue { - if (operand_ty.isPtrLikeOptional()) { - assert(operand_ty.abiSize(self.target.*) == 4); + const mod = self.bin_file.options.module.?; + if (operand_ty.isPtrLikeOptional(mod)) { + assert(operand_ty.abiSize(mod) == 4); const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } }; return self.cmp(operand_bind, imm_bind, Type.usize, .eq); @@ -5131,9 +5161,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { + const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + if (self.air.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5301,7 +5332,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5382,7 +5414,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); // C flag: movcs reg, #1 @@ -5466,6 +5498,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5640,17 +5673,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void }, .stack_offset => |off| { // TODO: maybe addressing from sp instead of fp - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh, 3, 4 => .ldr, else => unreachable, }; const extra_offset = switch (abi_size) { - 1 => ty.isSignedInt(), + 1 => ty.isSignedInt(mod), 2 => true, 3, 4 => false, else => unreachable, @@ -5691,11 +5724,11 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .stack_argument_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, 3, 4 => .ldr_stack_argument, else => unreachable, }; @@ -5712,7 +5745,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -6039,8 +6073,9 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.air.typeOf(pl_op.operand); - const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const error_union_align = error_union_ty.abiAlignment(self.target.*); + const mod = self.bin_file.options.module.?; + const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the // error union after the body in order to extract the payload @@ -6069,22 +6104,18 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.value(inst, mod).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { .constant => { // Constants have static lifetimes, so they are always memoized in the outer most table. @@ -6166,6 +6197,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (cc) { .Naked => { @@ -6180,12 +6212,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var ncrn: usize = 0; // Next Core Register Number var nsaa: u32 = 0; // Next stacked argument address - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); // TODO handle cases where multiple registers are used if (ret_ty_size <= 4) { result.return_value = .{ .register = c_abi_int_return_regs[0] }; @@ -6200,10 +6232,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } for (param_types, 0..) |ty, i| { - if (ty.abiAlignment(self.target.*) == 8) + if (ty.abiAlignment(mod) == 8) ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.abiSize(mod)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (param_size <= 4) { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; @@ -6215,7 +6247,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 4; - if (ty.abiAlignment(self.target.*) == 8) + if (ty.abiAlignment(mod) == 8) nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); result.args[i] = .{ .stack_argument_offset = nsaa }; @@ -6227,14 +6259,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { result.stack_align = 8; }, .Unspecified => { - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 4) { result.return_value = .{ .register = .r0 }; @@ -6250,9 +6282,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; for (param_types, 0..) |ty, i| { - if (ty.abiSize(self.target.*) > 0) { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - const param_alignment = ty.abiAlignment(self.target.*); + if (ty.abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.abiSize(mod)); + const param_alignment = ty.abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index 8b9ec45e24dc..ca7fff7d0887 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -1,8 +1,10 @@ const std = @import("std"); +const assert = std.debug.assert; const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = union(enum) { memory, @@ -22,28 +24,28 @@ pub const Class = union(enum) { pub const Context = enum { ret, arg }; -pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *const Module, ctx: Context) Class { + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; const max_byval_size = 512; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (ty.containerLayout() == .Packed) { if (bit_size > 64) return .memory; return .byval; } if (bit_size > max_byval_size) return .memory; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; const fields = ty.structFieldCount(); var i: u32 = 0; while (i < fields) : (i += 1) { const field_ty = ty.structFieldType(i); - const field_alignment = ty.structFieldAlign(i, target); - const field_size = field_ty.bitSize(target); + const field_alignment = ty.structFieldAlign(i, mod); + const field_size = field_ty.bitSize(mod); if (field_size > 32 or field_alignment > 32) { return Class.arrSize(bit_size, 64); } @@ -51,17 +53,17 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { return Class.arrSize(bit_size, 32); }, .Union => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (ty.containerLayout() == .Packed) { if (bit_size > 64) return .memory; return .byval; } if (bit_size > max_byval_size) return .memory; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; for (ty.unionFields().values()) |field| { - if (field.ty.bitSize(target) > 32 or field.normalAlignment(target) > 32) { + if (field.ty.bitSize(mod) > 32 or field.normalAlignment(mod) > 32) { return Class.arrSize(bit_size, 64); } } @@ -71,28 +73,28 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { .Int => { // TODO this is incorrect for _BitInt(128) but implementing // this correctly makes implementing compiler-rt impossible. - // const bit_size = ty.bitSize(target); + // const bit_size = ty.bitSize(mod); // if (bit_size > 64) return .memory; return .byval; }, .Enum, .ErrorSet => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 64) return .memory; return .byval; }, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); // TODO is this controlled by a cpu feature? if (ctx == .ret and bit_size > 128) return .memory; if (bit_size > 512) return .memory; return .byval; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + assert(!ty.isSlice()); return .byval; }, .ErrorUnion, @@ -114,14 +116,15 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { } const byval_float_count = 4; -fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 { +fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u32 { + const target = mod.getTarget(); const invalid = std.math.maxInt(u32); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Union => { const fields = ty.unionFields(); var max_count: u32 = 0; for (fields.values()) |field| { - const field_count = countFloats(field.ty, target, maybe_float_bits); + const field_count = countFloats(field.ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; if (field_count > max_count) max_count = field_count; if (max_count > byval_float_count) return invalid; @@ -134,7 +137,7 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 { var i: u32 = 0; while (i < fields_len) : (i += 1) { const field_ty = ty.structFieldType(i); - const field_count = countFloats(field_ty, target, maybe_float_bits); + const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; if (count > byval_float_count) return invalid; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5fb07c5fdcee..75d5a87bf274 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -755,8 +755,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -805,22 +805,22 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const elem_ty = self.air.typeOfIndex(inst).elemType(); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -893,10 +893,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + const mod = self.bin_file.options.module.?; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); + const info_a = operand_ty.intInfo(mod); + const info_b = self.air.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1068,18 +1069,18 @@ fn binOp( lhs_ty: Type, rhs_ty: Type, ) InnerError!MCValue { + const mod = self.bin_file.options.module.?; switch (tag) { // Arithmetic operations on integers and floats .add, .sub, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO immediate operands return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); @@ -1093,14 +1094,14 @@ fn binOp( .ptr_add, .ptr_sub, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; const elem_ty = switch (ptr_ty.ptrSize()) { .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); if (elem_size == 1) { const base_tag: Air.Inst.Tag = switch (tag) { @@ -1331,10 +1332,11 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const optional_ty = self.air.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) + if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); @@ -1526,7 +1528,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + const mod = self.bin_file.options.module.?; + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -1698,6 +1701,7 @@ fn airFence(self: *Self) !void { } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { + const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; const fn_ty = self.air.typeOf(pl_op.operand); @@ -1736,7 +1740,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } } - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); @@ -1828,7 +1832,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const ty = self.air.typeOf(bin_op.lhs); const mod = self.bin_file.options.module.?; assert(ty.eql(self.air.typeOf(bin_op.rhs), mod)); - if (ty.zigTypeTag() == .ErrorSet) + if (ty.zigTypeTag(mod) == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); const lhs = try self.resolveInst(bin_op.lhs); @@ -2107,7 +2111,8 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (self.air.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -2533,22 +2538,18 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBits()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBits()) + if (!inst_ty.hasRuntimeBits(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.value(inst, mod).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { .constant => { // Constants have static lifetimes, so they are always memoized in the outer most table. @@ -2630,6 +2631,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (cc) { .Naked => { @@ -2650,7 +2652,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 }; for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -2680,14 +2682,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}), } - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else switch (cc) { .Naked => unreachable, .Unspecified, .C => { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size <= 8) { result.return_value = .{ .register = .a0 }; } else if (ret_ty_size <= 16) { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index bec1b49a4edf..c9e0873bcecf 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -3,16 +3,18 @@ const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = enum { memory, byval, integer, double_integer }; -pub fn classifyType(ty: Type, target: std.Target) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *const Module) Class { + const target = mod.getTarget(); + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); const max_byval_size = target.ptrBitWidth() * 2; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (ty.containerLayout() == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; @@ -23,7 +25,7 @@ pub fn classifyType(ty: Type, target: std.Target) Class { return .integer; }, .Union => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (ty.containerLayout() == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; @@ -36,17 +38,17 @@ pub fn classifyType(ty: Type, target: std.Target) Class { .Bool => return .integer, .Float => return .byval, .Int, .Enum, .ErrorSet => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > max_byval_size) return .memory; return .byval; }, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > max_byval_size) return .memory; return .integer; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index b70bc0f73d21..63b604857e47 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -758,18 +758,18 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 32, 64 => { // Only say yes if the operation is @@ -1018,7 +1018,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { switch (arg) { .stack_offset => |off| { const mod = self.bin_file.options.module.?; - const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse { + const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; const offset = off + abi_size; @@ -1203,6 +1203,7 @@ fn airBreakpoint(self: *Self) !void { } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; // We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you. @@ -1218,14 +1219,14 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO byteswap for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits == 8) break :result operand; const abi_size = int_info.bits >> 3; - const abi_align = operand_ty.abiAlignment(self.target.*); + const abi_align = operand_ty.abiAlignment(mod); const opposite_endian_asi = switch (self.target.cpu.arch.endian()) { Endian.Big => ASI.asi_primary_little, Endian.Little => ASI.asi_primary, @@ -1294,7 +1295,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); const ty = self.air.typeOf(callee); - const fn_ty = switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, @@ -1337,7 +1339,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -1374,7 +1376,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } } else @panic("TODO SPARCv9 currently does not support non-ELF binaries"); } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(ty, .o7, mcv); @@ -1422,15 +1424,15 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.air.typeOf(bin_op.lhs); - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Vector => unreachable, // Handled by cmp_vector. - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(), .Int => lhs_ty, .Bool => Type.initTag(.u1), .Pointer => Type.usize, @@ -1438,9 +1440,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { .Optional => blk: { var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO SPARCv9 cmp non-pointer optionals", .{}); @@ -1450,7 +1452,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 64) { _ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{ .lhs = bin_op.lhs, @@ -1512,8 +1514,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -1752,10 +1754,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + const mod = self.bin_file.options.module.?; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); + const info_a = operand_ty.intInfo(mod); + const info_b = self.air.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1814,9 +1817,10 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -2037,18 +2041,18 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { //const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 1...32 => { try self.spillConditionFlagsIfOccupied(); @@ -2101,6 +2105,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); @@ -2116,7 +2121,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }; }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { const op_reg = switch (operand) { .register => |r| r, @@ -2150,7 +2155,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 64) { const op_reg = switch (operand) { .register => |r| r, @@ -2332,16 +2337,17 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { try self.spillConditionFlagsIfOccupied(); @@ -2449,7 +2455,8 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_ty = self.air.typeOf(bin_op.lhs); const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); @@ -2564,9 +2571,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .dead, .unreach => unreachable, @@ -2701,7 +2709,8 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const error_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) break :result mcv; + const mod = self.bin_file.options.module.?; + if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement unwrap error union error for non-empty payloads", .{}); }; @@ -2713,7 +2722,8 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBits()) break :result MCValue.none; + const mod = self.bin_file.options.module.?; + if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none; return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{}); }; @@ -2727,7 +2737,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const error_union_ty = self.air.getRefType(ty_op.ty); const payload_ty = error_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) break :result mcv; + const mod = self.bin_file.options.module.?; + if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); }; @@ -2747,7 +2758,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const optional_ty = self.air.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) + const mod = self.bin_file.options.module.?; + if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); @@ -2784,7 +2796,8 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const elem_ty = self.air.typeOfIndex(inst).elemType(); - if (!elem_ty.hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized @@ -2792,22 +2805,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -2860,12 +2872,12 @@ fn binOp( .xor, .cmp_eq, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Only say yes if the operation is // commutative, i.e. we can swap both of the @@ -2934,10 +2946,10 @@ fn binOp( const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); // Truncate if necessary - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const result_reg = result.register; try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); @@ -2951,11 +2963,11 @@ fn binOp( }, .div_trunc => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate_ok = switch (tag) { .div_trunc => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12), @@ -2984,14 +2996,14 @@ fn binOp( }, .ptr_add => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; const elem_ty = switch (ptr_ty.ptrSize()) { .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); if (elem_size == 1) { const base_tag: Mir.Inst.Tag = switch (tag) { @@ -3016,7 +3028,7 @@ fn binOp( .bool_and, .bool_or, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { assert(lhs != .immediate); // should have been handled by Sema assert(rhs != .immediate); // should have been handled by Sema @@ -3046,10 +3058,10 @@ fn binOp( const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); // Truncate if necessary - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // 32 and 64 bit operands doesn't need truncating if (int_info.bits == 32 or int_info.bits == 64) return result; @@ -3068,10 +3080,10 @@ fn binOp( .shl_exact, .shr_exact, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate_ok = rhs == .immediate; @@ -3393,7 +3405,8 @@ fn binOpRegister( fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (self.air.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -3512,16 +3525,17 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { /// Given an error union, returns the payload fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return error_union_mcv; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (error_union_mcv) { .register => return self.fail("TODO errUnionPayload for registers", .{}), .stack_offset => |off| { @@ -3555,8 +3569,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -3730,6 +3744,7 @@ fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Reg } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -3928,19 +3943,20 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. try self.genSetReg(ty, reg, .{ .immediate = addr }); - try self.genLoad(reg, reg, i13, 0, ty.abiSize(self.target.*)); + try self.genLoad(reg, reg, i13, 0, ty.abiSize(mod)); }, .stack_offset => |off| { const real_offset = realStackOffset(off); const simm13 = math.cast(i13, real_offset) orelse return self.fail("TODO larger stack offsets: {}", .{real_offset}); - try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(self.target.*)); + try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(mod)); }, } } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -3948,7 +3964,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. - switch (ty.abiSize(self.target.*)) { + switch (ty.abiSize(mod)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), @@ -3978,7 +3994,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); // TODO handle floating point CCRs @@ -4152,13 +4168,14 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; const error_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); - if (!error_type.hasRuntimeBits()) { + if (!error_type.hasRuntimeBits(mod)) { return MCValue{ .immediate = 0 }; // always false - } else if (!payload_type.hasRuntimeBits()) { - if (error_type.abiSize(self.target.*) <= 8) { + } else if (!payload_type.hasRuntimeBits(mod)) { + if (error_type.abiSize(mod) <= 8) { const reg_mcv: MCValue = switch (operand) { .register => operand, else => .{ .register = try self.copyToTmpRegister(error_type, operand) }, @@ -4249,8 +4266,9 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void { } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { + const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.elemType(); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4321,11 +4339,11 @@ fn minMax( ) InnerError!MCValue { const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO min/max on floats", .{}), .Vector => return self.fail("TODO min/max on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO skip register setting when one of the operands // is a small (fits in i13) immediate. @@ -4455,6 +4473,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (cc) { .Naked => { @@ -4478,7 +4497,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) }; for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -4505,12 +4524,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) result.stack_byte_count = next_stack_offset; result.stack_align = 16; - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. if (ret_ty_size <= 8) { result.return_value = switch (role) { @@ -4528,40 +4547,37 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) return result; } -fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } +fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { + const mod = self.bin_file.options.module.?; + const ty = self.air.typeOf(ref); // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) - return MCValue{ .none = {} }; - - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); - switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { - // Constants have static lifetimes, so they are always memoized in the outer most table. - const branch = &self.branch_stack.items[0]; - const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); - if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; - gop.value_ptr.* = try self.genTypedValue(.{ - .ty = inst_ty, - .val = self.air.values[ty_pl.payload], - }); - } - return gop.value_ptr.*; - }, - .const_ty => unreachable, - else => return self.getResolvedInstValue(inst_index), + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + + if (Air.refToIndex(ref)) |inst| { + switch (self.air.instructions.items(.tag)[inst]) { + .constant => { + // Constants have static lifetimes, so they are always memoized in the outer most table. + const branch = &self.branch_stack.items[0]; + const gop = try branch.inst_table.getOrPut(self.gpa, inst); + if (!gop.found_existing) { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + gop.value_ptr.* = try self.genTypedValue(.{ + .ty = ty, + .val = self.air.values[ty_pl.payload], + }); + } + return gop.value_ptr.*; + }, + .const_ty => unreachable, + else => return self.getResolvedInstValue(inst), + } } + + return self.genTypedValue(.{ + .ty = ty, + .val = self.air.value(ref, mod).?, + }); } fn ret(self: *Self, mcv: MCValue) !void { @@ -4666,7 +4682,8 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const abi_size = value_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = value_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4707,10 +4724,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.air.typeOf(operand); const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4748,8 +4766,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 64) { const operand_reg = switch (operand) { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d4be9bf13969..b592ffcb2a00 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -788,9 +788,10 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref); assert(!gop.found_existing); - const val = func.air.value(ref).?; + const mod = func.bin_file.base.options.module.?; + const val = func.air.value(ref, mod).?; const ty = func.air.typeOf(ref); - if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt() and !ty.isError()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; return gop.value_ptr.*; } @@ -801,7 +802,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { // // In the other cases, we will simply lower the constant to a value that fits // into a single local (such as a pointer, integer, bool, etc). - const result = if (isByRef(ty, func.target)) blk: { + const result = if (isByRef(ty, mod)) blk: { const sym_index = try func.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, func.decl_index); break :blk WValue{ .memory = sym_index }; } else try func.lowerConstant(val, ty); @@ -987,8 +988,9 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 } /// Using a given `Type`, returns the corresponding type -fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { - return switch (ty.zigTypeTag()) { +fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { + const target = mod.getTarget(); + return switch (ty.zigTypeTag(mod)) { .Float => blk: { const bits = ty.floatBits(target); if (bits == 16) return wasm.Valtype.i32; // stored/loaded as u16 @@ -998,7 +1000,7 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { return wasm.Valtype.i32; // represented as pointer to stack }, .Int, .Enum => blk: { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); if (info.bits <= 32) break :blk wasm.Valtype.i32; if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64; break :blk wasm.Valtype.i32; // represented as pointer to stack @@ -1006,22 +1008,18 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { .Struct => switch (ty.containerLayout()) { .Packed => { const struct_obj = ty.castTag(.@"struct").?.data; - return typeToValtype(struct_obj.backing_int_ty, target); + return typeToValtype(struct_obj.backing_int_ty, mod); }, else => wasm.Valtype.i32, }, - .Vector => switch (determineSimdStoreStrategy(ty, target)) { + .Vector => switch (determineSimdStoreStrategy(ty, mod)) { .direct => wasm.Valtype.v128, .unrolled => wasm.Valtype.i32, }, .Union => switch (ty.containerLayout()) { .Packed => { - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.bitSize(target)), - }; - const int_ty = Type.initPayload(&int_ty_payload.base); - return typeToValtype(int_ty, target); + const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory"); + return typeToValtype(int_ty, mod); }, else => wasm.Valtype.i32, }, @@ -1030,17 +1028,17 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { } /// Using a given `Type`, returns the byte representation of its wasm value type -fn genValtype(ty: Type, target: std.Target) u8 { - return wasm.valtype(typeToValtype(ty, target)); +fn genValtype(ty: Type, mod: *Module) u8 { + return wasm.valtype(typeToValtype(ty, mod)); } /// Using a given `Type`, returns the corresponding wasm value type /// Differently from `genValtype` this also allows `void` to create a block /// with no return type -fn genBlockType(ty: Type, target: std.Target) u8 { +fn genBlockType(ty: Type, mod: *Module) u8 { return switch (ty.tag()) { .void, .noreturn => wasm.block_empty, - else => genValtype(ty, target), + else => genValtype(ty, mod), }; } @@ -1101,7 +1099,8 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue { /// Creates one locals for a given `Type`. /// Returns a corresponding `Wvalue` with `local` as active tag fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - const valtype = typeToValtype(ty, func.target); + const mod = func.bin_file.base.options.module.?; + const valtype = typeToValtype(ty, mod); switch (valtype) { .i32 => if (func.free_locals_i32.popOrNull()) |index| { log.debug("reusing local ({d}) of type {}", .{ index, valtype }); @@ -1132,7 +1131,8 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { /// Ensures a new local will be created. This is useful when it's useful /// to use a zero-initialized local. fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - try func.locals.append(func.gpa, genValtype(ty, func.target)); + const mod = func.bin_file.base.options.module.?; + try func.locals.append(func.gpa, genValtype(ty, mod)); const initial_index = func.local_index; func.local_index += 1; return WValue{ .local = .{ .value = initial_index, .references = 1 } }; @@ -1140,48 +1140,54 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { /// Generates a `wasm.Type` from a given function type. /// Memory is owned by the caller. -fn genFunctype(gpa: Allocator, cc: std.builtin.CallingConvention, params: []const Type, return_type: Type, target: std.Target) !wasm.Type { +fn genFunctype( + gpa: Allocator, + cc: std.builtin.CallingConvention, + params: []const Type, + return_type: Type, + mod: *Module, +) !wasm.Type { var temp_params = std.ArrayList(wasm.Valtype).init(gpa); defer temp_params.deinit(); var returns = std.ArrayList(wasm.Valtype).init(gpa); defer returns.deinit(); - if (firstParamSRet(cc, return_type, target)) { + if (firstParamSRet(cc, return_type, mod)) { try temp_params.append(.i32); // memory address is always a 32-bit handle - } else if (return_type.hasRuntimeBitsIgnoreComptime()) { + } else if (return_type.hasRuntimeBitsIgnoreComptime(mod)) { if (cc == .C) { - const res_classes = abi.classifyType(return_type, target); + const res_classes = abi.classifyType(return_type, mod); assert(res_classes[0] == .direct and res_classes[1] == .none); - const scalar_type = abi.scalarType(return_type, target); - try returns.append(typeToValtype(scalar_type, target)); + const scalar_type = abi.scalarType(return_type, mod); + try returns.append(typeToValtype(scalar_type, mod)); } else { - try returns.append(typeToValtype(return_type, target)); + try returns.append(typeToValtype(return_type, mod)); } - } else if (return_type.isError()) { + } else if (return_type.isError(mod)) { try returns.append(.i32); } // param types for (params) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; switch (cc) { .C => { - const param_classes = abi.classifyType(param_type, target); + const param_classes = abi.classifyType(param_type, mod); for (param_classes) |class| { if (class == .none) continue; if (class == .direct) { - const scalar_type = abi.scalarType(param_type, target); - try temp_params.append(typeToValtype(scalar_type, target)); + const scalar_type = abi.scalarType(param_type, mod); + try temp_params.append(typeToValtype(scalar_type, mod)); } else { - try temp_params.append(typeToValtype(param_type, target)); + try temp_params.append(typeToValtype(param_type, mod)); } } }, - else => if (isByRef(param_type, target)) + else => if (isByRef(param_type, mod)) try temp_params.append(.i32) else - try temp_params.append(typeToValtype(param_type, target)), + try temp_params.append(typeToValtype(param_type, mod)), } } @@ -1227,7 +1233,8 @@ pub fn generate( fn genFunc(func: *CodeGen) InnerError!void { const fn_info = func.decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target); + const mod = func.bin_file.base.options.module.?; + var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod); defer func_type.deinit(func.gpa); _ = try func.bin_file.storeDeclType(func.decl_index, func_type); @@ -1254,7 +1261,7 @@ fn genFunc(func: *CodeGen) InnerError!void { if (func_type.returns.len != 0 and func.air.instructions.len > 0) { const inst = @intCast(u32, func.air.instructions.len - 1); const last_inst_ty = func.air.typeOfIndex(inst); - if (!last_inst_ty.hasRuntimeBitsIgnoreComptime() or last_inst_ty.isNoReturn()) { + if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn()) { try func.addTag(.@"unreachable"); } } @@ -1335,6 +1342,7 @@ const CallWValues = struct { }; fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues { + const mod = func.bin_file.base.options.module.?; const cc = fn_ty.fnCallingConvention(); const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen()); defer func.gpa.free(param_types); @@ -1351,7 +1359,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV // Check if we store the result as a pointer to the stack rather than // by value const fn_info = fn_ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { // the sret arg will be passed as first argument, therefore we // set the `return_value` before allocating locals for regular args. result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } }; @@ -1361,7 +1369,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV switch (cc) { .Unspecified => { for (param_types) |ty| { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { continue; } @@ -1371,7 +1379,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV }, .C => { for (param_types) |ty| { - const ty_classes = abi.classifyType(ty, func.target); + const ty_classes = abi.classifyType(ty, mod); for (ty_classes) |class| { if (class == .none) continue; try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } }); @@ -1385,11 +1393,11 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV return result; } -fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, target: std.Target) bool { +fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *const Module) bool { switch (cc) { - .Unspecified, .Inline => return isByRef(return_type, target), + .Unspecified, .Inline => return isByRef(return_type, mod), .C => { - const ty_classes = abi.classifyType(return_type, target); + const ty_classes = abi.classifyType(return_type, mod); if (ty_classes[0] == .indirect) return true; if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true; return false; @@ -1405,16 +1413,17 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } - const ty_classes = abi.classifyType(ty, func.target); + const mod = func.bin_file.base.options.module.?; + const ty_classes = abi.classifyType(ty, mod); assert(ty_classes[0] != .none); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct, .Union => { if (ty_classes[0] == .indirect) { return func.lowerToStack(value); } assert(ty_classes[0] == .direct); - const scalar_type = abi.scalarType(ty, func.target); - const abi_size = scalar_type.abiSize(func.target); + const scalar_type = abi.scalarType(ty, mod); + const abi_size = scalar_type.abiSize(mod); try func.emitWValue(value); // When the value lives in the virtual stack, we must load it onto the actual stack @@ -1422,12 +1431,12 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: const opcode = buildOpcode(.{ .op = .load, .width = @intCast(u8, abi_size), - .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, func.target), + .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, + .valtype1 = typeToValtype(scalar_type, mod), }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = value.offset(), - .alignment = scalar_type.abiAlignment(func.target), + .alignment = scalar_type.abiAlignment(mod), }); } }, @@ -1436,7 +1445,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } assert(ty_classes[0] == .direct and ty_classes[1] == .direct); - assert(ty.abiSize(func.target) == 16); + assert(ty.abiSize(mod) == 16); // in this case we have an integer or float that must be lowered as 2 i64's. try func.emitWValue(value); try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 }); @@ -1503,18 +1512,18 @@ fn restoreStackPointer(func: *CodeGen) !void { /// /// Asserts Type has codegenbits fn allocStack(func: *CodeGen, ty: Type) !WValue { - assert(ty.hasRuntimeBitsIgnoreComptime()); + const mod = func.bin_file.base.options.module.?; + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); if (func.initial_stack_value == .none) { try func.initializeStack(); } - const abi_size = std.math.cast(u32, ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; + const abi_size = std.math.cast(u32, ty.abiSize(mod)) orelse { return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - ty.fmt(module), ty.abiSize(func.target), + ty.fmt(mod), ty.abiSize(mod), }); }; - const abi_align = ty.abiAlignment(func.target); + const abi_align = ty.abiAlignment(mod); if (abi_align > func.stack_alignment) { func.stack_alignment = abi_align; @@ -1531,6 +1540,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { /// This is different from allocStack where this will use the pointer's alignment /// if it is set, to ensure the stack alignment will be set correctly. fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { + const mod = func.bin_file.base.options.module.?; const ptr_ty = func.air.typeOfIndex(inst); const pointee_ty = ptr_ty.childType(); @@ -1538,15 +1548,14 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { try func.initializeStack(); } - if (!pointee_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pointee_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.allocStack(Type.usize); // create a value containing just the stack pointer. } - const abi_alignment = ptr_ty.ptrAlignment(func.target); - const abi_size = std.math.cast(u32, pointee_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; + const abi_alignment = ptr_ty.ptrAlignment(mod); + const abi_size = std.math.cast(u32, pointee_ty.abiSize(mod)) orelse { return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - pointee_ty.fmt(module), pointee_ty.abiSize(func.target), + pointee_ty.fmt(mod), pointee_ty.abiSize(mod), }); }; if (abi_alignment > func.stack_alignment) { @@ -1704,8 +1713,9 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch { /// For a given `Type`, will return true when the type will be passed /// by reference, rather than by value -fn isByRef(ty: Type, target: std.Target) bool { - switch (ty.zigTypeTag()) { +fn isByRef(ty: Type, mod: *const Module) bool { + const target = mod.getTarget(); + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeInt, .ComptimeFloat, @@ -1726,40 +1736,40 @@ fn isByRef(ty: Type, target: std.Target) bool { .Array, .Frame, - => return ty.hasRuntimeBitsIgnoreComptime(), + => return ty.hasRuntimeBitsIgnoreComptime(mod), .Union => { if (ty.castTag(.@"union")) |union_ty| { if (union_ty.data.layout == .Packed) { - return ty.abiSize(target) > 8; + return ty.abiSize(mod) > 8; } } - return ty.hasRuntimeBitsIgnoreComptime(); + return ty.hasRuntimeBitsIgnoreComptime(mod); }, .Struct => { if (ty.castTag(.@"struct")) |struct_ty| { const struct_obj = struct_ty.data; if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { - return isByRef(struct_obj.backing_int_ty, target); + return isByRef(struct_obj.backing_int_ty, mod); } } - return ty.hasRuntimeBitsIgnoreComptime(); + return ty.hasRuntimeBitsIgnoreComptime(mod); }, - .Vector => return determineSimdStoreStrategy(ty, target) == .unrolled, - .Int => return ty.intInfo(target).bits > 64, + .Vector => return determineSimdStoreStrategy(ty, mod) == .unrolled, + .Int => return ty.intInfo(mod).bits > 64, .Float => return ty.floatBits(target) > 64, .ErrorUnion => { const pl_ty = ty.errorUnionPayload(); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } return true; }, .Optional => { - if (ty.isPtrLikeOptional()) return false; + if (ty.isPtrLikeOptional(mod)) return false; var buf: Type.Payload.ElemType = undefined; const pl_type = ty.optionalChild(&buf); - if (pl_type.zigTypeTag() == .ErrorSet) return false; - return pl_type.hasRuntimeBitsIgnoreComptime(); + if (pl_type.zigTypeTag(mod) == .ErrorSet) return false; + return pl_type.hasRuntimeBitsIgnoreComptime(mod); }, .Pointer => { // Slices act like struct and will be passed by reference @@ -1778,10 +1788,11 @@ const SimdStoreStrategy = enum { /// This means when a given type is 128 bits and either the simd128 or relaxed-simd /// features are enabled, the function will return `.direct`. This would allow to store /// it using a instruction, rather than an unrolled version. -fn determineSimdStoreStrategy(ty: Type, target: std.Target) SimdStoreStrategy { - std.debug.assert(ty.zigTypeTag() == .Vector); - if (ty.bitSize(target) != 128) return .unrolled; +fn determineSimdStoreStrategy(ty: Type, mod: *const Module) SimdStoreStrategy { + std.debug.assert(ty.zigTypeTag(mod) == .Vector); + if (ty.bitSize(mod) != 128) return .unrolled; const hasFeature = std.Target.wasm.featureSetHas; + const target = mod.getTarget(); const features = target.cpu.features; if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) { return .direct; @@ -2084,32 +2095,33 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(un_op); const fn_info = func.decl.ty.fnInfo(); const ret_ty = fn_info.return_type; + const mod = func.bin_file.base.options.module.?; // result must be stored in the stack and we return a pointer // to the stack instead if (func.return_value != .none) { try func.store(func.return_value, operand, ret_ty, 0); - } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) { - switch (ret_ty.zigTypeTag()) { + } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + switch (ret_ty.zigTypeTag(mod)) { // Aggregate types can be lowered as a singular value .Struct, .Union => { - const scalar_type = abi.scalarType(ret_ty, func.target); + const scalar_type = abi.scalarType(ret_ty, mod); try func.emitWValue(operand); const opcode = buildOpcode(.{ .op = .load, - .width = @intCast(u8, scalar_type.abiSize(func.target) * 8), - .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, func.target), + .width = @intCast(u8, scalar_type.abiSize(mod) * 8), + .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, + .valtype1 = typeToValtype(scalar_type, mod), }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = operand.offset(), - .alignment = scalar_type.abiAlignment(func.target), + .alignment = scalar_type.abiAlignment(mod), }); }, else => try func.emitWValue(operand), } } else { - if (!ret_ty.hasRuntimeBitsIgnoreComptime() and ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and ret_ty.isError(mod)) { try func.addImm32(0); } else { try func.emitWValue(operand); @@ -2123,14 +2135,15 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const child_type = func.air.typeOfIndex(inst).childType(); + const mod = func.bin_file.base.options.module.?; var result = result: { - if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { break :result try func.allocStack(Type.usize); // create pointer to void } const fn_info = func.decl.ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { break :result func.return_value; } @@ -2141,16 +2154,17 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const ret_ty = func.air.typeOf(un_op).childType(); const fn_info = func.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (ret_ty.isError(mod)) { try func.addImm32(0); } - } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { // leave on the stack _ = try func.load(operand, ret_ty, 0); } @@ -2167,26 +2181,26 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]); const ty = func.air.typeOf(pl_op.operand); - const fn_ty = switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, }; const ret_ty = fn_ty.fnReturnType(); const fn_info = fn_ty.fnInfo(); - const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target); + const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod); const callee: ?Decl.Index = blk: { - const func_val = func.air.value(pl_op.operand) orelse break :blk null; - const module = func.bin_file.base.options.module.?; + const func_val = func.air.value(pl_op.operand, mod) orelse break :blk null; if (func_val.castTag(.function)) |function| { _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl); break :blk function.data.owner_decl; } else if (func_val.castTag(.extern_fn)) |extern_fn| { - const ext_decl = module.declPtr(extern_fn.data.owner_decl); + const ext_decl = mod.declPtr(extern_fn.data.owner_decl); const ext_info = ext_decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target); + var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, mod); defer func_type.deinit(func.gpa); const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl); const atom = func.bin_file.getAtomPtr(atom_index); @@ -2215,7 +2229,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const arg_val = try func.resolveInst(arg); const arg_ty = func.air.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val); } @@ -2226,11 +2240,11 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else { // in this case we call a function pointer // so load its value onto the stack - std.debug.assert(ty.zigTypeTag() == .Pointer); + std.debug.assert(ty.zigTypeTag(mod) == .Pointer); const operand = try func.resolveInst(pl_op.operand); try func.emitWValue(operand); - var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target); + var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod); defer fn_type.deinit(func.gpa); const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type); @@ -2238,7 +2252,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } const result_value = result_value: { - if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { break :result_value WValue{ .none = {} }; } else if (ret_ty.isNoReturn()) { try func.addTag(.@"unreachable"); @@ -2246,10 +2260,10 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else if (first_param_sret) { break :result_value sret; // TODO: Make this less fragile and optimize - } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag() == .Struct or ret_ty.zigTypeTag() == .Union) { + } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) { const result_local = try func.allocLocal(ret_ty); try func.addLabel(.local_set, result_local.local.value); - const scalar_type = abi.scalarType(ret_ty, func.target); + const scalar_type = abi.scalarType(ret_ty, mod); const result = try func.allocStack(scalar_type); try func.store(result, result_local, scalar_type, 0); break :result_value result; @@ -2272,6 +2286,7 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -2290,17 +2305,13 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void } else { // at this point we have a non-natural alignment, we must // load the value, and then shift+or the rhs into the result location. - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const int_elem_ty = Type.initPayload(&int_ty_payload.base); + const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8); - if (isByRef(int_elem_ty, func.target)) { + if (isByRef(int_elem_ty, mod)) { return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{}); } - var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(func.target))) - 1); + var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1); mask <<= @intCast(u6, ptr_info.bit_offset); mask ^= ~@as(u64, 0); const shift_val = if (ptr_info.host_size <= 4) @@ -2329,11 +2340,12 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void { assert(!(lhs != .stack and rhs == .stack)); - const abi_size = ty.abiSize(func.target); - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + const abi_size = ty.abiSize(mod); + switch (ty.zigTypeTag(mod)) { .ErrorUnion => { const pl_ty = ty.errorUnionPayload(); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.anyerror, 0); } @@ -2341,26 +2353,26 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { return func.store(lhs, rhs, Type.usize, 0); } var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.u8, 0); } - if (pl_ty.zigTypeTag() == .ErrorSet) { + if (pl_ty.zigTypeTag(mod) == .ErrorSet) { return func.store(lhs, rhs, Type.anyerror, 0); } const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Struct, .Array, .Union => if (isByRef(ty, func.target)) { + .Struct, .Array, .Union => if (isByRef(ty, mod)) { const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Vector => switch (determineSimdStoreStrategy(ty, func.target)) { + .Vector => switch (determineSimdStoreStrategy(ty, mod)) { .unrolled => { const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); @@ -2374,7 +2386,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_store), offset + lhs.offset(), - ty.abiAlignment(func.target), + ty.abiAlignment(mod), }); return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); }, @@ -2404,7 +2416,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset()); return; } else if (abi_size > 16) { - try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(func.target)) }); + try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(mod)) }); }, else => if (abi_size > 8) { return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{ @@ -2418,7 +2430,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE // into lhs, so we calculate that and emit that instead try func.lowerToStack(rhs); - const valtype = typeToValtype(ty, func.target); + const valtype = typeToValtype(ty, mod); const opcode = buildOpcode(.{ .valtype1 = valtype, .width = @intCast(u8, abi_size * 8), @@ -2428,21 +2440,22 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE // store rhs value at stack pointer's location in memory try func.addMemArg( Mir.Inst.Tag.fromOpcode(opcode), - .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(func.target) }, + .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(mod) }, ); } fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.air.getRefType(ty_op.ty); const ptr_ty = func.air.typeOf(ty_op.operand); const ptr_info = ptr_ty.ptrInfo().data; - if (!ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{ty_op.operand}); + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand}); const result = result: { - if (isByRef(ty, func.target)) { + if (isByRef(ty, mod)) { const new_local = try func.allocStack(ty); try func.store(new_local, operand, ty, 0); break :result new_local; @@ -2455,11 +2468,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // at this point we have a non-natural alignment, we must // shift the value to obtain the correct bit. - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const int_elem_ty = Type.initPayload(&int_ty_payload.base); + const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8); const shift_val = if (ptr_info.host_size <= 4) WValue{ .imm32 = ptr_info.bit_offset } else if (ptr_info.host_size <= 8) @@ -2479,25 +2488,26 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Loads an operand from the linear memory section. /// NOTE: Leaves the value on the stack. fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; // load local's value from memory by its stack position try func.emitWValue(operand); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { // TODO: Add helper functions for simd opcodes const extra_index = @intCast(u32, func.mir_extra.items.len); // stores as := opcode, offset, alignment (opcode::memarg) try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_load), offset + operand.offset(), - ty.abiAlignment(func.target), + ty.abiAlignment(mod), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); return WValue{ .stack = {} }; } - const abi_size = @intCast(u8, ty.abiSize(func.target)); + const abi_size = @intCast(u8, ty.abiSize(mod)); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, func.target), + .valtype1 = typeToValtype(ty, mod), .width = abi_size * 8, .op = .load, .signedness = .unsigned, @@ -2505,7 +2515,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu try func.addMemArg( Mir.Inst.Tag.fromOpcode(opcode), - .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(func.target) }, + .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(mod) }, ); return WValue{ .stack = {} }; @@ -2516,8 +2526,9 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const arg = func.args[arg_index]; const cc = func.decl.ty.fnInfo().cc; const arg_ty = func.air.typeOfIndex(inst); + const mod = func.bin_file.base.options.module.?; if (cc == .C) { - const arg_classes = abi.classifyType(arg_ty, func.target); + const arg_classes = abi.classifyType(arg_ty, mod); for (arg_classes) |class| { if (class != .none) { func.arg_index += 1; @@ -2527,7 +2538,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When we have an argument that's passed using more than a single parameter, // we combine them into a single stack value if (arg_classes[0] == .direct and arg_classes[1] == .direct) { - if (arg_ty.zigTypeTag() != .Int and arg_ty.zigTypeTag() != .Float) { + if (arg_ty.zigTypeTag(mod) != .Int and arg_ty.zigTypeTag(mod) != .Float) { return func.fail( "TODO: Implement C-ABI argument for type '{}'", .{arg_ty.fmt(func.bin_file.base.options.module.?)}, @@ -2557,6 +2568,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -2570,10 +2582,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse { + const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?; + const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2593,6 +2605,7 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { /// Performs a binary operation on the given `WValue`'s /// NOTE: THis leaves the value on top of the stack. fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; assert(!(lhs != .stack and rhs == .stack)); if (ty.isAnyFloat()) { @@ -2600,8 +2613,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! return func.floatOp(float_op, ty, &.{ lhs, rhs }); } - if (isByRef(ty, func.target)) { - if (ty.zigTypeTag() == .Int) { + if (isByRef(ty, mod)) { + if (ty.zigTypeTag(mod) == .Int) { return func.binOpBigInt(lhs, rhs, ty, op); } else { return func.fail( @@ -2613,8 +2626,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = typeToValtype(ty, func.target), - .signedness = if (ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(ty, mod), + .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned, }); try func.emitWValue(lhs); try func.emitWValue(rhs); @@ -2625,7 +2638,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! } fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { - if (ty.intInfo(func.target).bits > 128) { + const mod = func.bin_file.base.options.module.?; + if (ty.intInfo(mod).bits > 128) { return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{}); } @@ -2763,7 +2777,8 @@ fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError } fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue { - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement floatOps for vectors", .{}); } @@ -2773,7 +2788,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In for (args) |operand| { try func.emitWValue(operand); } - const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, func.target) }); + const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, mod) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; } @@ -2827,6 +2842,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In } fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); @@ -2834,7 +2850,7 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const lhs_ty = func.air.typeOf(bin_op.lhs); const rhs_ty = func.air.typeOf(bin_op.rhs); - if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement wrapping arithmetic for vectors", .{}); } @@ -2845,10 +2861,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse { + const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?; + const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2877,8 +2893,9 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr /// Asserts `Type` is <= 128 bits. /// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack. fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - assert(ty.abiSize(func.target) <= 16); - const bitsize = @intCast(u16, ty.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + assert(ty.abiSize(mod) <= 16); + const bitsize = @intCast(u16, ty.bitSize(mod)); const wasm_bits = toWasmBits(bitsize) orelse { return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize}); }; @@ -2915,6 +2932,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { } fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; switch (ptr_val.tag()) { .decl_ref_mut => { const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; @@ -2932,15 +2950,15 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue const field_ptr = ptr_val.castTag(.field_ptr).?.data; const parent_ty = field_ptr.container_ty; - const field_offset = switch (parent_ty.zigTypeTag()) { + const field_offset = switch (parent_ty.zigTypeTag(mod)) { .Struct => switch (parent_ty.containerLayout()) { - .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, func.target), - else => parent_ty.structFieldOffset(field_ptr.field_index, func.target), + .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod), + else => parent_ty.structFieldOffset(field_ptr.field_index, mod), }, .Union => switch (parent_ty.containerLayout()) { .Packed => 0, else => blk: { - const layout: Module.Union.Layout = parent_ty.unionGetLayout(func.target); + const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod); if (layout.payload_size == 0) break :blk 0; if (layout.payload_align > layout.tag_align) break :blk 0; @@ -2964,7 +2982,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; const index = elem_ptr.index; - const elem_offset = index * elem_ptr.elem_ty.abiSize(func.target); + const elem_offset = index * elem_ptr.elem_ty.abiSize(mod); return func.lowerParentPtr(elem_ptr.array_ptr, offset + @intCast(u32, elem_offset)); }, .opt_payload_ptr => { @@ -2976,9 +2994,9 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue } fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { - const module = func.bin_file.base.options.module.?; - const decl = module.declPtr(decl_index); - module.markDeclAlive(decl); + const mod = func.bin_file.base.options.module.?; + const decl = mod.declPtr(decl_index); + mod.markDeclAlive(decl); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = decl.ty, @@ -2992,18 +3010,18 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind return WValue{ .memory = try func.bin_file.lowerUnnamedConst(tv, decl_index) }; } - const module = func.bin_file.base.options.module.?; - const decl = module.declPtr(decl_index); - if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) { + const mod = func.bin_file.base.options.module.?; + const decl = mod.declPtr(decl_index); + if (decl.ty.zigTypeTag(mod) != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime(mod)) { return WValue{ .imm32 = 0xaaaaaaaa }; } - module.markDeclAlive(decl); + mod.markDeclAlive(decl); const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index); const atom = func.bin_file.getAtom(atom_index); const target_sym_index = atom.sym_index; - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { try func.bin_file.addTableFunction(target_sym_index); return WValue{ .function_index = target_sym_index }; } else if (offset == 0) { @@ -3041,31 +3059,31 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { const decl_index = decl_ref_mut.data.decl_index; return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); } - const target = func.target; - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + switch (ty.zigTypeTag(mod)) { .Void => return WValue{ .none = {} }, .Int => { - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { 0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement( - val.toSignedInt(target), + val.toSignedInt(mod), @intCast(u6, int_info.bits), )) }, 33...64 => return WValue{ .imm64 = toTwosComplement( - val.toSignedInt(target), + val.toSignedInt(mod), @intCast(u7, int_info.bits), ) }, else => unreachable, }, .unsigned => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, - 33...64 => return WValue{ .imm64 = val.toUnsignedInt(target) }, + 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, + 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) }, else => unreachable, }, } }, - .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, + .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, .Float => switch (ty.floatBits(func.target)) { 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16)) }, 32 => return WValue{ .float32 = val.toFloat(f32) }, @@ -3074,7 +3092,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, .Pointer => switch (val.tag()) { .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), - .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, + .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, .zero, .null_value => return WValue{ .imm32 = 0 }, else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), }, @@ -3100,8 +3118,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}), } } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_buffer); + const int_tag_ty = ty.intTagType(); return func.lowerConstant(val, int_tag_ty); } }, @@ -3115,7 +3132,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { .ErrorUnion => { const error_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const is_pl = val.errorUnionIsPayload(); const err_val = if (!is_pl) val else Value.initTag(.zero); @@ -3123,12 +3140,12 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, - .Optional => if (ty.optionalReprIsPayload()) { + .Optional => if (ty.optionalReprIsPayload(mod)) { var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); if (val.castTag(.opt_payload)) |payload| { return func.lowerConstant(payload.data, pl_ty); - } else if (val.isNull()) { + } else if (val.isNull(mod)) { return WValue{ .imm32 = 0 }; } else { return func.lowerConstant(val, pl_ty); @@ -3150,7 +3167,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { return func.lowerConstant(int_val, struct_obj.backing_int_ty); }, .Vector => { - assert(determineSimdStoreStrategy(ty, target) == .direct); + assert(determineSimdStoreStrategy(ty, mod) == .direct); var buf: [16]u8 = undefined; val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable; return func.storeSimdImmd(buf); @@ -3176,9 +3193,10 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue { } fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + switch (ty.zigTypeTag(mod)) { .Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa }, - .Int, .Enum => switch (ty.intInfo(func.target).bits) { + .Int, .Enum => switch (ty.intInfo(mod).bits) { 0...32 => return WValue{ .imm32 = 0xaaaaaaaa }, 33...64 => return WValue{ .imm64 = 0xaaaaaaaaaaaaaaaa }, else => unreachable, @@ -3197,7 +3215,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { .Optional => { var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { return func.emitUndefined(pl_ty); } return WValue{ .imm32 = 0xaaaaaaaa }; @@ -3210,7 +3228,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { assert(struct_obj.layout == .Packed); return func.emitUndefined(struct_obj.backing_int_ty); }, - else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag()}), + else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}), } } @@ -3218,8 +3236,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { /// It's illegal to provide a value with a type that cannot be represented /// as an integer value. fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { - const target = func.target; - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + switch (ty.zigTypeTag(mod)) { .Enum => { if (val.castTag(.enum_field_index)) |field_index| { switch (ty.tag()) { @@ -3239,35 +3257,35 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { else => unreachable, } } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_buffer); + const int_tag_ty = ty.intTagType(); return func.valueAsI32(val, int_tag_ty); } }, - .Int => switch (ty.intInfo(func.target).signedness) { - .signed => return @truncate(i32, val.toSignedInt(target)), - .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(target))), + .Int => switch (ty.intInfo(mod).signedness) { + .signed => return @truncate(i32, val.toSignedInt(mod)), + .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(mod))), }, .ErrorSet => { const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function return @bitCast(i32, kv.value); }, - .Bool => return @intCast(i32, val.toSignedInt(target)), - .Pointer => return @intCast(i32, val.toSignedInt(target)), + .Bool => return @intCast(i32, val.toSignedInt(mod)), + .Pointer => return @intCast(i32, val.toSignedInt(mod)), else => unreachable, // Programmer called this function for an illegal type } } fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const block_ty = func.air.getRefType(ty_pl.ty); - const wasm_block_ty = genBlockType(block_ty, func.target); + const wasm_block_ty = genBlockType(block_ty, mod); const extra = func.air.extraData(Air.Block, ty_pl.payload); const body = func.air.extra[extra.end..][0..extra.data.body_len]; // if wasm_block_ty is non-empty, we create a register to store the temporary value const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: { - const ty: Type = if (isByRef(block_ty, func.target)) Type.u32 else block_ty; + const ty: Type = if (isByRef(block_ty, mod)) Type.u32 else block_ty; break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten } else WValue.none; @@ -3379,16 +3397,17 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In /// NOTE: This leaves the result on top of the stack, rather than a new local. fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue { assert(!(lhs != .stack and rhs == .stack)); - if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // When we hit this case, we must check the value of optionals // that are not pointers. This means first checking against non-null for // both lhs and rhs, as well as checking the payload are matching of lhs and rhs return func.cmpOptionals(lhs, rhs, ty, op); } - } else if (isByRef(ty, func.target)) { + } else if (isByRef(ty, mod)) { return func.cmpBigInt(lhs, rhs, ty, op); } else if (ty.isAnyFloat() and ty.floatBits(func.target) == 16) { return func.cmpFloat16(lhs, rhs, op); @@ -3401,13 +3420,13 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (ty.zigTypeTag() != .Int) break :blk .unsigned; + if (ty.zigTypeTag(mod) != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk ty.intInfo(func.target).signedness; + break :blk ty.intInfo(mod).signedness; }; const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, func.target), + .valtype1 = typeToValtype(ty, mod), .op = switch (op) { .lt => .lt, .lte => .le, @@ -3464,11 +3483,12 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const br = func.air.instructions.items(.data)[inst].br; const block = func.blocks.get(br.block_inst).?; // if operand has codegen bits we should break with a value - if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime()) { + if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) { const operand = try func.resolveInst(br.operand); try func.lowerToStack(operand); @@ -3490,16 +3510,17 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const operand_ty = func.air.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; const result = result: { - if (operand_ty.zigTypeTag() == .Bool) { + if (operand_ty.zigTypeTag(mod) == .Bool) { try func.emitWValue(operand); try func.addTag(.i32_eqz); const not_tmp = try func.allocLocal(operand_ty); try func.addLabel(.local_set, not_tmp.local.value); break :result not_tmp; } else { - const operand_bits = operand_ty.intInfo(func.target).bits; + const operand_bits = operand_ty.intInfo(mod).bits; const wasm_bits = toWasmBits(operand_bits) orelse { return func.fail("TODO: Implement binary NOT for integer with bitsize '{d}'", .{operand_bits}); }; @@ -3566,16 +3587,17 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; // if we bitcast a float to or from an integer we must use the 'reinterpret' instruction if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand; if (wanted_ty.tag() == .f16 or given_ty.tag() == .f16) return operand; - if (wanted_ty.bitSize(func.target) > 64) return operand; - assert((wanted_ty.isInt() and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt())); + if (wanted_ty.bitSize(mod) > 64) return operand; + assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod))); const opcode = buildOpcode(.{ .op = .reinterpret, - .valtype1 = typeToValtype(wanted_ty, func.target), - .valtype2 = typeToValtype(given_ty, func.target), + .valtype1 = typeToValtype(wanted_ty, mod), + .valtype2 = typeToValtype(given_ty, mod), }); try func.emitWValue(operand); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); @@ -3609,19 +3631,20 @@ fn structFieldPtr( struct_ty: Type, index: u32, ) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; const result_ty = func.air.typeOfIndex(inst); const offset = switch (struct_ty.containerLayout()) { - .Packed => switch (struct_ty.zigTypeTag()) { + .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { if (result_ty.ptrInfo().data.host_size != 0) { break :offset @as(u32, 0); } - break :offset struct_ty.packedStructFieldByteOffset(index, func.target); + break :offset struct_ty.packedStructFieldByteOffset(index, mod); }, .Union => 0, else => unreachable, }, - else => struct_ty.structFieldOffset(index, func.target), + else => struct_ty.structFieldOffset(index, mod), }; // save a load and store when we can simply reuse the operand if (offset == 0) { @@ -3636,6 +3659,7 @@ fn structFieldPtr( } fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data; @@ -3643,15 +3667,15 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); const result = switch (struct_ty.containerLayout()) { - .Packed => switch (struct_ty.zigTypeTag()) { + .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => result: { const struct_obj = struct_ty.castTag(.@"struct").?.data; - const offset = struct_obj.packedFieldBitOffset(func.target, field_index); + const offset = struct_obj.packedFieldBitOffset(mod, field_index); const backing_ty = struct_obj.backing_int_ty; - const wasm_bits = toWasmBits(backing_ty.intInfo(func.target).bits) orelse { + const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse { return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{}); }; const const_wvalue = if (wasm_bits == 32) @@ -3667,25 +3691,17 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else try func.binOp(operand, const_wvalue, backing_ty, .shr); - if (field_ty.zigTypeTag() == .Float) { - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&payload.base); + if (field_ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); - } else if (field_ty.isPtrAtRuntime() and struct_obj.fields.count() == 1) { + } else if (field_ty.isPtrAtRuntime(mod) and struct_obj.fields.count() == 1) { // In this case we do not have to perform any transformations, // we can simply reuse the operand. break :result func.reuseOperand(struct_field.struct_operand, operand); - } else if (field_ty.isPtrAtRuntime()) { - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&payload.base); + } else if (field_ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); break :result try truncated.toLocal(func, field_ty); } @@ -3693,8 +3709,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try truncated.toLocal(func, field_ty); }, .Union => result: { - if (isByRef(struct_ty, func.target)) { - if (!isByRef(field_ty, func.target)) { + if (isByRef(struct_ty, mod)) { + if (!isByRef(field_ty, mod)) { const val = try func.load(operand, field_ty, 0); break :result try val.toLocal(func, field_ty); } else { @@ -3704,26 +3720,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, struct_ty.bitSize(func.target)), - }; - const union_int_type = Type.initPayload(&payload.base); - if (field_ty.zigTypeTag() == .Float) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + const union_int_type = try mod.intType(.unsigned, @intCast(u16, struct_ty.bitSize(mod))); + if (field_ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(operand, int_type, union_int_type); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); - } else if (field_ty.isPtrAtRuntime()) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + } else if (field_ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(operand, int_type, union_int_type); break :result try truncated.toLocal(func, field_ty); } @@ -3733,11 +3737,10 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, }, else => result: { - const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)}); + const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, mod)) orelse { + return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(mod)}); }; - if (isByRef(field_ty, func.target)) { + if (isByRef(field_ty, mod)) { switch (operand) { .stack_offset => |stack_offset| { break :result WValue{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } }; @@ -3754,6 +3757,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; // result type is always 'noreturn' const blocktype = wasm.block_empty; const pl_op = func.air.instructions.items(.data)[inst].pl_op; @@ -3787,7 +3791,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { errdefer func.gpa.free(values); for (items, 0..) |ref, i| { - const item_val = func.air.value(ref).?; + const item_val = func.air.value(ref, mod).?; const int_val = func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; @@ -3810,7 +3814,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When the target is an integer size larger than u32, we have no way to use the value // as an index, therefore we also use an if/else-chain for those cases. // TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'. - const is_sparse = highest - lowest > 50 or target_ty.bitSize(func.target) > 32; + const is_sparse = highest - lowest > 50 or target_ty.bitSize(mod) > 32; const else_body = func.air.extra[extra_index..][0..switch_br.data.else_body_len]; const has_else_body = else_body.len != 0; @@ -3855,7 +3859,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // for errors that are not present in any branch. This is fine as this default // case will never be hit for those cases but we do save runtime cost and size // by using a jump table for this instead of if-else chains. - break :blk if (has_else_body or target_ty.zigTypeTag() == .ErrorSet) case_i else unreachable; + break :blk if (has_else_body or target_ty.zigTypeTag(mod) == .ErrorSet) case_i else unreachable; }; func.mir_extra.appendAssumeCapacity(idx); } else if (has_else_body) { @@ -3866,10 +3870,10 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (target_ty.zigTypeTag() != .Int) break :blk .unsigned; + if (target_ty.zigTypeTag(mod) != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk target_ty.intInfo(func.target).signedness; + break :blk target_ty.intInfo(mod).signedness; }; try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @boolToInt(has_else_body)); @@ -3882,7 +3886,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(case.values[0].value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, func.target), + .valtype1 = typeToValtype(target_ty, mod), .op = .ne, // not equal, because we want to jump out of this block if it does not match the condition. .signedness = signedness, }); @@ -3896,7 +3900,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(value.value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, func.target), + .valtype1 = typeToValtype(target_ty, mod), .op = .eq, .signedness = signedness, }); @@ -3933,6 +3937,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const err_union_ty = func.air.typeOf(un_op); @@ -3948,10 +3953,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } try func.emitWValue(operand); - if (pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.addMemArg(.i32_load16_u, .{ - .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, func.target)), - .alignment = Type.anyerror.abiAlignment(func.target), + .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, mod)), + .alignment = Type.anyerror.abiAlignment(mod), }); } @@ -3967,6 +3972,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -3975,15 +3981,15 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const payload_ty = err_ty.errorUnionPayload(); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (op_is_ptr) { break :result func.reuseOperand(ty_op.operand, operand); } break :result WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)); - if (op_is_ptr or isByRef(payload_ty, func.target)) { + const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + if (op_is_ptr or isByRef(payload_ty, mod)) { break :result try func.buildPointerOffset(operand, pl_offset, .new); } @@ -3994,6 +4000,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo } fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4006,17 +4013,18 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) break :result WValue{ .imm32 = 0 }; } - if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, func.target))); + const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, mod))); break :result try error_val.toLocal(func, Type.anyerror); }; func.finishAir(inst, result, &.{ty_op.operand}); } fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4024,18 +4032,18 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void const pl_ty = func.air.typeOf(ty_op.operand); const result = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } const err_union = try func.allocStack(err_ty); - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new); + const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); try func.store(payload_ptr, operand, pl_ty, 0); // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. try func.emitWValue(err_union); try func.addImm32(0); - const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target)); + const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 }); break :result err_union; }; @@ -4043,6 +4051,7 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void } fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4050,17 +4059,17 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const pl_ty = err_ty.errorUnionPayload(); const result = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } const err_union = try func.allocStack(err_ty); // store error value - try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, func.target))); + try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, mod))); // write 'undefined' to the payload - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new); - const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(func.target)); + const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); + const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(mod)); try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); break :result err_union; @@ -4074,15 +4083,16 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.getRefType(ty_op.ty); const operand = try func.resolveInst(ty_op.operand); const operand_ty = func.air.typeOf(ty_op.operand); - if (ty.zigTypeTag() == .Vector or operand_ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) { return func.fail("todo Wasm intcast for vectors", .{}); } - if (ty.abiSize(func.target) > 16 or operand_ty.abiSize(func.target) > 16) { + if (ty.abiSize(mod) > 16 or operand_ty.abiSize(mod) > 16) { return func.fail("todo Wasm intcast for bitsize > 128", .{}); } - const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(func.target))).?; - const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(mod))).?; + const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; const result = if (op_bits == wanted_bits) func.reuseOperand(ty_op.operand, operand) else @@ -4096,8 +4106,9 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Asserts type's bitsize <= 128 /// NOTE: May leave the result on the top of the stack. fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { - const given_bitsize = @intCast(u16, given.bitSize(func.target)); - const wanted_bitsize = @intCast(u16, wanted.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const given_bitsize = @intCast(u16, given.bitSize(mod)); + const wanted_bitsize = @intCast(u16, wanted.bitSize(mod)); assert(given_bitsize <= 128); assert(wanted_bitsize <= 128); @@ -4110,7 +4121,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro try func.addTag(.i32_wrap_i64); } else if (op_bits == 32 and wanted_bits > 32 and wanted_bits <= 64) { try func.emitWValue(operand); - try func.addTag(if (wanted.isSignedInt()) .i64_extend_i32_s else .i64_extend_i32_u); + try func.addTag(if (wanted.isSignedInt(mod)) .i64_extend_i32_s else .i64_extend_i32_u); } else if (wanted_bits == 128) { // for 128bit integers we store the integer in the virtual stack, rather than a local const stack_ptr = try func.allocStack(wanted); @@ -4119,14 +4130,14 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro // for 32 bit integers, we first coerce the value into a 64 bit integer before storing it // meaning less store operations are required. const lhs = if (op_bits == 32) blk: { - break :blk try func.intcast(operand, given, if (wanted.isSignedInt()) Type.i64 else Type.u64); + break :blk try func.intcast(operand, given, if (wanted.isSignedInt(mod)) Type.i64 else Type.u64); } else operand; // store msb first try func.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset()); // For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value - if (wanted.isSignedInt()) { + if (wanted.isSignedInt(mod)) { try func.emitWValue(stack_ptr); const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr); try func.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset()); @@ -4154,16 +4165,16 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: /// For a given type and operand, checks if it's considered `null`. /// NOTE: Leaves the result on the stack fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; try func.emitWValue(operand); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); - if (!optional_ty.optionalReprIsPayload()) { + if (!optional_ty.optionalReprIsPayload(mod)) { // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(module)}); + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(mod)}); }; try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 }); } @@ -4183,18 +4194,19 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod } fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const opt_ty = func.air.typeOf(ty_op.operand); const payload_ty = func.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.finishAir(inst, .none, &.{ty_op.operand}); } const result = result: { const operand = try func.resolveInst(ty_op.operand); - if (opt_ty.optionalReprIsPayload()) break :result func.reuseOperand(ty_op.operand, operand); + if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand); - if (isByRef(payload_ty, func.target)) { + if (isByRef(payload_ty, mod)) { break :result try func.buildPointerOffset(operand, 0, .new); } @@ -4209,10 +4221,11 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const opt_ty = func.air.typeOf(ty_op.operand).childType(); + const mod = func.bin_file.base.options.module.?; const result = result: { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.optionalReprIsPayload()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } @@ -4222,22 +4235,22 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const opt_ty = func.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { return func.finishAir(inst, operand, &.{ty_op.operand}); } - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)}); + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(mod)}); }; try func.emitWValue(operand); @@ -4251,9 +4264,10 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const payload_ty = func.air.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const non_null_bit = try func.allocStack(Type.initTag(.u1)); try func.emitWValue(non_null_bit); try func.addImm32(1); @@ -4263,12 +4277,11 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const op_ty = func.air.typeOfIndex(inst); - if (op_ty.optionalReprIsPayload()) { + if (op_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)}); + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(mod)}); }; // Create optional type, set the non-null bit, and store the operand inside the optional type @@ -4314,7 +4327,8 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); // load pointer onto stack _ = try func.load(slice, Type.usize, 0); @@ -4328,7 +4342,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result_ptr.local.value); - const result = if (!isByRef(elem_ty, func.target)) result: { + const result = if (!isByRef(elem_ty, mod)) result: { const elem_val = try func.load(result_ptr, elem_ty, 0); break :result try elem_val.toLocal(func, elem_ty); } else result_ptr; @@ -4341,7 +4355,8 @@ fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); @@ -4389,13 +4404,14 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Truncates a given operand to a given type, discarding any overflown bits. /// NOTE: Resulting value is left on the stack. fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue { - const given_bits = @intCast(u16, given_ty.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const given_bits = @intCast(u16, given_ty.bitSize(mod)); if (toWasmBits(given_bits) == null) { return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits}); } var result = try func.intcast(operand, given_ty, wanted_ty); - const wanted_bits = @intCast(u16, wanted_ty.bitSize(func.target)); + const wanted_bits = @intCast(u16, wanted_ty.bitSize(mod)); const wasm_bits = toWasmBits(wanted_bits).?; if (wasm_bits != wanted_bits) { result = try func.wrapOperand(result, wanted_ty); @@ -4412,6 +4428,7 @@ fn airBoolToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4422,7 +4439,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const slice_local = try func.allocStack(slice_ty); // store the array ptr in the slice - if (array_ty.hasRuntimeBitsIgnoreComptime()) { + if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.store(slice_local, operand, Type.usize, 0); } @@ -4454,7 +4471,8 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = ptr_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); // load pointer onto the stack if (ptr_ty.isSlice()) { @@ -4472,7 +4490,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_result = val: { var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { break :val result; } defer result.free(func); // only free if it's not returned like above @@ -4489,7 +4507,8 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr_ty = func.air.typeOf(bin_op.lhs); const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); @@ -4513,6 +4532,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4524,13 +4544,13 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { else => ptr_ty.childType(), }; - const valtype = typeToValtype(Type.usize, func.target); + const valtype = typeToValtype(Type.usize, mod); const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul }); const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op }); try func.lowerToStack(ptr); try func.emitWValue(offset); - try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(func.target)))); + try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(mod)))); try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode)); try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode)); @@ -4572,7 +4592,8 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void /// this to wasm's memset instruction. When the feature is not present, /// we implement it manually. fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void { - const abi_size = @intCast(u32, elem_ty.abiSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const abi_size = @intCast(u32, elem_ty.abiSize(mod)); // When bulk_memory is enabled, we lower it to wasm's memset instruction. // If not, we lower it ourselves. @@ -4666,24 +4687,25 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const array = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = array_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); - if (isByRef(array_ty, func.target)) { + if (isByRef(array_ty, mod)) { try func.lowerToStack(array); try func.emitWValue(index); try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); try func.addTag(.i32_mul); try func.addTag(.i32_add); } else { - std.debug.assert(array_ty.zigTypeTag() == .Vector); + std.debug.assert(array_ty.zigTypeTag(mod) == .Vector); switch (index) { inline .imm32, .imm64 => |lane| { - const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(func.target)) { - 8 => if (elem_ty.isSignedInt()) .i8x16_extract_lane_s else .i8x16_extract_lane_u, - 16 => if (elem_ty.isSignedInt()) .i16x8_extract_lane_s else .i16x8_extract_lane_u, - 32 => if (elem_ty.isInt()) .i32x4_extract_lane else .f32x4_extract_lane, - 64 => if (elem_ty.isInt()) .i64x2_extract_lane else .f64x2_extract_lane, + const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(mod)) { + 8 => if (elem_ty.isSignedInt(mod)) .i8x16_extract_lane_s else .i8x16_extract_lane_u, + 16 => if (elem_ty.isSignedInt(mod)) .i16x8_extract_lane_s else .i16x8_extract_lane_u, + 32 => if (elem_ty.isInt(mod)) .i32x4_extract_lane else .f32x4_extract_lane, + 64 => if (elem_ty.isInt(mod)) .i64x2_extract_lane else .f64x2_extract_lane, else => unreachable, }; @@ -4715,7 +4737,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { break :val result; } defer result.free(func); // only free if no longer needed and not returned like above @@ -4733,17 +4755,18 @@ fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const dest_ty = func.air.typeOfIndex(inst); const op_ty = func.air.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; - if (op_ty.abiSize(func.target) > 8) { + if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{}); } try func.emitWValue(operand); const op = buildOpcode(.{ .op = .trunc, - .valtype1 = typeToValtype(dest_ty, func.target), - .valtype2 = typeToValtype(op_ty, func.target), - .signedness = if (dest_ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(dest_ty, mod), + .valtype2 = typeToValtype(op_ty, mod), + .signedness = if (dest_ty.isSignedInt(mod)) .signed else .unsigned, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); const wrapped = try func.wrapOperand(.{ .stack = {} }, dest_ty); @@ -4757,17 +4780,18 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const dest_ty = func.air.typeOfIndex(inst); const op_ty = func.air.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; - if (op_ty.abiSize(func.target) > 8) { + if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{}); } try func.emitWValue(operand); const op = buildOpcode(.{ .op = .convert, - .valtype1 = typeToValtype(dest_ty, func.target), - .valtype2 = typeToValtype(op_ty, func.target), - .signedness = if (op_ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(dest_ty, mod), + .valtype2 = typeToValtype(op_ty, mod), + .signedness = if (op_ty.isSignedInt(mod)) .signed else .unsigned, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); @@ -4777,18 +4801,19 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.air.typeOfIndex(inst); const elem_ty = ty.childType(); - if (determineSimdStoreStrategy(ty, func.target) == .direct) blk: { + if (determineSimdStoreStrategy(ty, mod) == .direct) blk: { switch (operand) { // when the operand lives in the linear memory section, we can directly // load and splat the value at once. Meaning we do not first have to load // the scalar value onto the stack. .stack_offset, .memory, .memory_offset => { - const opcode = switch (elem_ty.bitSize(func.target)) { + const opcode = switch (elem_ty.bitSize(mod)) { 8 => std.wasm.simdOpcode(.v128_load8_splat), 16 => std.wasm.simdOpcode(.v128_load16_splat), 32 => std.wasm.simdOpcode(.v128_load32_splat), @@ -4803,18 +4828,18 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.mir_extra.appendSlice(func.gpa, &[_]u32{ opcode, operand.offset(), - elem_ty.abiAlignment(func.target), + elem_ty.abiAlignment(mod), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); try func.addLabel(.local_set, result.local.value); return func.finishAir(inst, result, &.{ty_op.operand}); }, .local => { - const opcode = switch (elem_ty.bitSize(func.target)) { + const opcode = switch (elem_ty.bitSize(mod)) { 8 => std.wasm.simdOpcode(.i8x16_splat), 16 => std.wasm.simdOpcode(.i16x8_splat), - 32 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), - 64 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat), + 32 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), + 64 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat), else => break :blk, // Cannot make use of simd-instructions }; const result = try func.allocLocal(ty); @@ -4828,14 +4853,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, } } - const elem_size = elem_ty.bitSize(func.target); + const elem_size = elem_ty.bitSize(mod); const vector_len = @intCast(usize, ty.vectorLen()); if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) { return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); } const result = try func.allocStack(ty); - const elem_byte_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_byte_size = @intCast(u32, elem_ty.abiSize(mod)); var index: usize = 0; var offset: u32 = 0; while (index < vector_len) : (index += 1) { @@ -4855,6 +4880,7 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const inst_ty = func.air.typeOfIndex(inst); const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -4865,16 +4891,15 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mask_len = extra.mask_len; const child_ty = inst_ty.childType(); - const elem_size = child_ty.abiSize(func.target); + const elem_size = child_ty.abiSize(mod); - const module = func.bin_file.base.options.module.?; // TODO: One of them could be by ref; handle in loop - if (isByRef(func.air.typeOf(extra.a), func.target) or isByRef(inst_ty, func.target)) { + if (isByRef(func.air.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { var buf: Value.ElemValueBuffer = undefined; - const value = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target); + const value = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); try func.emitWValue(result); @@ -4895,7 +4920,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lanes = std.mem.asBytes(operands[1..]); for (0..@intCast(usize, mask_len)) |index| { var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target); + const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); const base_index = if (mask_elem >= 0) @intCast(u8, @intCast(i64, elem_size) * mask_elem) else @@ -4930,13 +4955,14 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ty = func.air.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); + const mod = func.bin_file.base.options.module.?; const result: WValue = result_value: { - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Array => { const result = try func.allocStack(result_ty); const elem_ty = result_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const sentinel = if (result_ty.sentinel()) |sent| blk: { break :blk try func.lowerConstant(sent, elem_ty); } else null; @@ -4944,7 +4970,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When the element type is by reference, we must copy the entire // value. It is therefore safer to move the offset pointer and store // each value individually, instead of using store offsets. - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { // copy stack pointer into a temporary local, which is // moved for each element to store each value in the right position. const offset = try func.buildPointerOffset(result, 0, .new); @@ -4974,7 +5000,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, .Struct => switch (result_ty.containerLayout()) { .Packed => { - if (isByRef(result_ty, func.target)) { + if (isByRef(result_ty, mod)) { return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{}); } const struct_obj = result_ty.castTag(.@"struct").?.data; @@ -4983,7 +5009,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // ensure the result is zero'd const result = try func.allocLocal(backing_type); - if (struct_obj.backing_int_ty.bitSize(func.target) <= 32) + if (struct_obj.backing_int_ty.bitSize(mod) <= 32) try func.addImm32(0) else try func.addImm64(0); @@ -4992,20 +5018,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var current_bit: u16 = 0; for (elements, 0..) |elem, elem_index| { const field = fields[elem_index]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const shift_val = if (struct_obj.backing_int_ty.bitSize(func.target) <= 32) + const shift_val = if (struct_obj.backing_int_ty.bitSize(mod) <= 32) WValue{ .imm32 = current_bit } else WValue{ .imm64 = current_bit }; const value = try func.resolveInst(elem); - const value_bit_size = @intCast(u16, field.ty.bitSize(func.target)); - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = value_bit_size, - }; - const int_ty = Type.initPayload(&int_ty_payload.base); + const value_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const int_ty = try mod.intType(.unsigned, value_bit_size); // load our current result on stack so we can perform all transformations // using only stack values. Saving the cost of loads and stores. @@ -5027,10 +5049,10 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(result_ty); const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset for (elements, 0..) |elem, elem_index| { - if (result_ty.structFieldValueComptime(elem_index) != null) continue; + if (result_ty.structFieldValueComptime(mod, elem_index) != null) continue; const elem_ty = result_ty.structFieldType(elem_index); - const elem_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const value = try func.resolveInst(elem); try func.store(offset, value, elem_ty, 0); @@ -5058,12 +5080,13 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data; const result = result: { const union_ty = func.air.typeOfIndex(inst); - const layout = union_ty.unionGetLayout(func.target); + const layout = union_ty.unionGetLayout(mod); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = union_obj.fields.values()[extra.field_index]; const field_name = union_obj.fields.keys()[extra.field_index]; @@ -5082,15 +5105,15 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (layout.tag_size == 0) { break :result WValue{ .none = {} }; } - assert(!isByRef(union_ty, func.target)); + assert(!isByRef(union_ty, mod)); break :result tag_int; } - if (isByRef(union_ty, func.target)) { + if (isByRef(union_ty, mod)) { const result_ptr = try func.allocStack(union_ty); const payload = try func.resolveInst(extra.init); if (layout.tag_align >= layout.payload_align) { - if (isByRef(field.ty, func.target)) { + if (isByRef(field.ty, mod)) { const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); try func.store(payload_ptr, payload, field.ty, 0); } else { @@ -5114,26 +5137,14 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result result_ptr; } else { const operand = try func.resolveInst(extra.init); - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, union_ty.bitSize(func.target)), - }; - const union_int_type = Type.initPayload(&payload.base); - if (field.ty.zigTypeTag() == .Float) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field.ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + const union_int_type = try mod.intType(.unsigned, @intCast(u16, union_ty.bitSize(mod))); + if (field.ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); const bitcasted = try func.bitcast(field.ty, int_type, operand); const casted = try func.trunc(bitcasted, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); - } else if (field.ty.isPtrAtRuntime()) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field.ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + } else if (field.ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); const casted = try func.intcast(operand, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); } @@ -5171,7 +5182,8 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void { } fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - assert(operand_ty.hasRuntimeBitsIgnoreComptime()); + const mod = func.bin_file.base.options.module.?; + assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod)); assert(op == .eq or op == .neq); var buf: Type.Payload.ElemType = undefined; const payload_ty = operand_ty.optionalChild(&buf); @@ -5189,7 +5201,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: _ = try func.load(lhs, payload_ty, 0); _ = try func.load(rhs, payload_ty, 0); - const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, func.target) }); + const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, mod) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); try func.addLabel(.br_if, 0); @@ -5207,10 +5219,11 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: /// NOTE: Leaves the result of the comparison on top of the stack. /// TODO: Lower this to compiler_rt call when bitsize > 128 fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - assert(operand_ty.abiSize(func.target) >= 16); + const mod = func.bin_file.base.options.module.?; + assert(operand_ty.abiSize(mod) >= 16); assert(!(lhs != .stack and rhs == .stack)); - if (operand_ty.bitSize(func.target) > 128) { - return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(func.target)}); + if (operand_ty.bitSize(mod) > 128) { + return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(mod)}); } var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64); @@ -5233,7 +5246,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std } }, else => { - const ty = if (operand_ty.isSignedInt()) Type.i64 else Type.u64; + const ty = if (operand_ty.isSignedInt(mod)) Type.i64 else Type.u64; // leave those value on top of the stack for '.select' const lhs_low_bit = try func.load(lhs, Type.u64, 8); const rhs_low_bit = try func.load(rhs, Type.u64, 8); @@ -5248,10 +5261,11 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std } fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const un_ty = func.air.typeOf(bin_op.lhs).childType(); const tag_ty = func.air.typeOf(bin_op.rhs); - const layout = un_ty.unionGetLayout(func.target); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); const union_ptr = try func.resolveInst(bin_op.lhs); @@ -5271,11 +5285,12 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const un_ty = func.air.typeOf(ty_op.operand); const tag_ty = func.air.typeOfIndex(inst); - const layout = un_ty.unionGetLayout(func.target); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand}); const operand = try func.resolveInst(ty_op.operand); @@ -5375,6 +5390,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const err_set_ty = func.air.typeOf(ty_op.operand).childType(); @@ -5386,26 +5402,27 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi operand, .{ .imm32 = 0 }, Type.anyerror, - @intCast(u32, errUnionErrorOffset(payload_ty, func.target)), + @intCast(u32, errUnionErrorOffset(payload_ty, mod)), ); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)), .new); + break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, mod)), .new); }; func.finishAir(inst, result, &.{ty_op.operand}); } fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try func.resolveInst(extra.field_ptr); const parent_ty = func.air.getRefType(ty_pl.ty).childType(); - const field_offset = parent_ty.structFieldOffset(extra.field_index, func.target); + const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const result = if (field_offset != 0) result: { const base = try func.buildPointerOffset(field_ptr, 0, .new); @@ -5428,6 +5445,7 @@ fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue } fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const dst = try func.resolveInst(bin_op.lhs); const dst_ty = func.air.typeOf(bin_op.lhs); @@ -5437,16 +5455,16 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const len = switch (dst_ty.ptrSize()) { .Slice => blk: { const slice_len = try func.sliceLen(dst); - if (ptr_elem_ty.abiSize(func.target) != 1) { + if (ptr_elem_ty.abiSize(mod) != 1) { try func.emitWValue(slice_len); - try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(func.target)) }); + try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(mod)) }); try func.addTag(.i32_mul); try func.addLabel(.local_set, slice_len.local.value); } break :blk slice_len; }, .One => @as(WValue, .{ - .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(func.target)), + .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(mod)), }), .C, .Many => unreachable, }; @@ -5472,12 +5490,13 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const op_ty = func.air.typeOf(ty_op.operand); const result_ty = func.air.typeOfIndex(inst); + const mod = func.bin_file.base.options.module.?; - if (op_ty.zigTypeTag() == .Vector) { + if (op_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement @popCount for vectors", .{}); } - const int_info = op_ty.intInfo(func.target); + const int_info = op_ty.intInfo(mod); const bits = int_info.bits; const wasm_bits = toWasmBits(bits) orelse { return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits}); @@ -5527,7 +5546,8 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // to make a copy of the ptr+value but can point towards them directly. const error_table_symbol = try func.bin_file.getErrorTableSymbol(); const name_ty = Type.initTag(.const_slice_u8_sentinel_0); - const abi_size = name_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const abi_size = name_ty.abiSize(mod); const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation try func.emitWValue(error_name_value); @@ -5566,12 +5586,13 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const lhs_op = try func.resolveInst(extra.lhs); const rhs_op = try func.resolveInst(extra.rhs); const lhs_ty = func.air.typeOf(extra.lhs); + const mod = func.bin_file.base.options.module.?; - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const is_signed = int_info.signedness == .signed; const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits}); @@ -5630,15 +5651,16 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, result_ty: Type, op: Op) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; assert(op == .add or op == .sub); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits != 128) { return func.fail("TODO: Implement @{{add/sub}}WithOverflow for integer bitsize '{d}'", .{int_info.bits}); @@ -5701,6 +5723,7 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, } fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; @@ -5709,11 +5732,11 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs_ty = func.air.typeOf(extra.lhs); const rhs_ty = func.air.typeOf(extra.rhs); - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const is_signed = int_info.signedness == .signed; const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits}); @@ -5721,7 +5744,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // Ensure rhs is coerced to lhs as they must have the same WebAssembly types // before we can perform any binary operation. - const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(func.target).bits).?; + const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(mod).bits).?; const rhs_final = if (wasm_bits != rhs_wasm_bits) blk: { const rhs_casted = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try rhs_casted.toLocal(func, lhs_ty); @@ -5750,7 +5773,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -5763,8 +5786,9 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); const lhs_ty = func.air.typeOf(extra.lhs); + const mod = func.bin_file.base.options.module.?; - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } @@ -5773,7 +5797,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var overflow_bit = try func.ensureAllocLocal(Type.initTag(.u1)); defer overflow_bit.free(func); - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits}); }; @@ -5924,7 +5948,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -5934,11 +5958,12 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{}); } - if (ty.abiSize(func.target) > 16) { + if (ty.abiSize(mod) > 16) { return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{}); } @@ -5954,7 +5979,7 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE try func.addTag(.select); // store result in local - const result_ty = if (isByRef(ty, func.target)) Type.u32 else ty; + const result_ty = if (isByRef(ty, mod)) Type.u32 else ty; const result = try func.allocLocal(result_ty); try func.addLabel(.local_set, result.local.value); func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); @@ -5965,7 +5990,8 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data; const ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@mulAdd` for vectors", .{}); } @@ -5998,12 +6024,13 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.typeOf(ty_op.operand); const result_ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@clz` for vectors", .{}); } const operand = try func.resolveInst(ty_op.operand); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); }; @@ -6051,12 +6078,13 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.typeOf(ty_op.operand); const result_ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@ctz` for vectors", .{}); } const operand = try func.resolveInst(ty_op.operand); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); }; @@ -6174,12 +6202,13 @@ fn lowerTry( err_union_ty: Type, operand_is_ptr: bool, ) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; if (operand_is_ptr) { return func.fail("TODO: lowerTry for pointers", .{}); } const pl_ty = err_union_ty.errorUnionPayload(); - const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(); + const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod); if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { // Block we can jump out of when error is not set @@ -6188,10 +6217,10 @@ fn lowerTry( // check if the error tag is set for the error union. try func.emitWValue(err_union); if (pl_has_bits) { - const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target)); + const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); try func.addMemArg(.i32_load16_u, .{ .offset = err_union.offset() + err_offset, - .alignment = Type.anyerror.abiAlignment(func.target), + .alignment = Type.anyerror.abiAlignment(mod), }); } try func.addTag(.i32_eqz); @@ -6213,8 +6242,8 @@ fn lowerTry( return WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)); - if (isByRef(pl_ty, func.target)) { + const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, mod)); + if (isByRef(pl_ty, mod)) { return buildPointerOffset(func, err_union, pl_offset, .new); } const payload = try func.load(err_union, pl_ty, pl_offset); @@ -6226,11 +6255,12 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); + const mod = func.bin_file.base.options.module.?; - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: @byteSwap for vectors", .{}); } - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); // bytes are no-op if (int_info.bits == 8) { @@ -6292,13 +6322,14 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ty = func.air.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const result = if (ty.isSignedInt()) + const result = if (ty.isSignedInt(mod)) try func.divSigned(lhs, rhs, ty) else try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); @@ -6306,13 +6337,14 @@ fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ty = func.air.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const div_result = if (ty.isSignedInt()) + const div_result = if (ty.isSignedInt(mod)) try func.divSigned(lhs, rhs, ty) else try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); @@ -6328,15 +6360,16 @@ fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; + const mod = func.bin_file.base.options.module.?; const ty = func.air.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - if (ty.isUnsignedInt()) { + if (ty.isUnsignedInt(mod)) { const result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); - } else if (ty.isSignedInt()) { - const int_bits = ty.intInfo(func.target).bits; + } else if (ty.isSignedInt(mod)) { + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: `@divFloor` for signed integers larger than '{d}' bits", .{int_bits}); }; @@ -6414,7 +6447,8 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue { - const int_bits = ty.intInfo(func.target).bits; + const mod = func.bin_file.base.options.module.?; + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: Implement signed division for integers with bitsize '{d}'", .{int_bits}); }; @@ -6441,7 +6475,8 @@ fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WVal /// Retrieves the absolute value of a signed integer /// NOTE: Leaves the result value on the stack. fn signAbsValue(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - const int_bits = ty.intInfo(func.target).bits; + const mod = func.bin_file.base.options.module.?; + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: signAbsValue for signed integers larger than '{d}' bits", .{int_bits}); }; @@ -6476,11 +6511,12 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { assert(op == .add or op == .sub); const bin_op = func.air.instructions.items(.data)[inst].bin_op; + const mod = func.bin_file.base.options.module.?; const ty = func.air.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { @@ -6523,7 +6559,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op: Op) InnerError!WValue { - const int_info = ty.intInfo(func.target); + const mod = func.bin_file.base.options.module.?; + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits).?; const is_wasm_bits = wasm_bits == int_info.bits; @@ -6588,8 +6625,9 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; + const mod = func.bin_file.base.options.module.?; const ty = func.air.typeOfIndex(inst); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits}); @@ -6707,12 +6745,13 @@ fn callIntrinsic( }; // Always pass over C-ABI - var func_type = try genFunctype(func.gpa, .C, param_types, return_type, func.target); + const mod = func.bin_file.base.options.module.?; + var func_type = try genFunctype(func.gpa, .C, param_types, return_type, mod); defer func_type.deinit(func.gpa); const func_type_index = try func.bin_file.putOrGetFuncType(func_type); try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index); - const want_sret_param = firstParamSRet(.C, return_type, func.target); + const want_sret_param = firstParamSRet(.C, return_type, mod); // if we want return as first param, we allocate a pointer to stack, // and emit it as our first argument const sret = if (want_sret_param) blk: { @@ -6724,14 +6763,14 @@ fn callIntrinsic( // Lower all arguments to the stack before we call our function for (args, 0..) |arg, arg_i| { assert(!(want_sret_param and arg == .stack)); - assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime()); + assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime(mod)); try func.lowerArg(.C, param_types[arg_i], arg); } // Actually call our intrinsic try func.addLabel(.call, symbol_index); - if (!return_type.hasRuntimeBitsIgnoreComptime()) { + if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { return WValue.none; } else if (return_type.isNoReturn()) { try func.addTag(.@"unreachable"); @@ -6759,15 +6798,15 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { + const mod = func.bin_file.base.options.module.?; const enum_decl_index = enum_ty.getOwnerDecl(); - const module = func.bin_file.base.options.module.?; var arena_allocator = std.heap.ArenaAllocator.init(func.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try module.declPtr(enum_decl_index).getFullyQualifiedName(module); - defer module.gpa.free(fqn); + const fqn = try mod.declPtr(enum_decl_index).getFullyQualifiedName(mod); + defer mod.gpa.free(fqn); const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); // check if we already generated code for this. @@ -6775,10 +6814,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { return loc.index; } - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); + const int_tag_ty = enum_ty.intTagType(); - if (int_tag_ty.bitSize(func.target) > 64) { + if (int_tag_ty.bitSize(mod) > 64) { return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{}); } @@ -6806,9 +6844,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { .data = @intCast(u64, tag_name.len), }; const name_ty = Type.initPayload(&name_ty_payload.base); - const string_bytes = &module.string_literal_bytes; - try string_bytes.ensureUnusedCapacity(module.gpa, tag_name.len); - const gop = try module.string_literal_table.getOrPutContextAdapted(module.gpa, tag_name, Module.StringLiteralAdapter{ + const string_bytes = &mod.string_literal_bytes; + try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len); + const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, tag_name, Module.StringLiteralAdapter{ .bytes = string_bytes, }, Module.StringLiteralContext{ .bytes = string_bytes, @@ -6929,7 +6967,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.end)); const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, func.target); + const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } @@ -6944,11 +6982,11 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len); defer values.deinit(); - const module = func.bin_file.base.options.module.?; + const mod = func.bin_file.base.options.module.?; var lowest: ?u32 = null; var highest: ?u32 = null; for (names) |name| { - const err_int = module.global_error_set.get(name).?; + const err_int = mod.global_error_set.get(name).?; if (lowest) |*l| { if (err_int < l.*) { l.* = err_int; @@ -7019,6 +7057,7 @@ inline fn useAtomicFeature(func: *const CodeGen) bool { } fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; @@ -7037,7 +7076,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr_operand); try func.lowerToStack(expected_val); try func.lowerToStack(new_val); - try func.addAtomicMemArg(switch (ty.abiSize(func.target)) { + try func.addAtomicMemArg(switch (ty.abiSize(mod)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, @@ -7045,14 +7084,14 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}), }, .{ .offset = ptr_operand.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); try func.addLabel(.local_tee, val_local.local.value); _ = try func.cmp(.stack, expected_val, ty, .eq); try func.addLabel(.local_set, cmp_result.local.value); break :val val_local; } else val: { - if (ty.abiSize(func.target) > 8) { + if (ty.abiSize(mod) > 8) { return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{}); } const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty); @@ -7068,7 +7107,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :val ptr_val; }; - const result_ptr = if (isByRef(result_ty, func.target)) val: { + const result_ptr = if (isByRef(result_ty, mod)) val: { try func.emitWValue(cmp_result); try func.addImm32(-1); try func.addTag(.i32_xor); @@ -7076,7 +7115,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_and); const and_result = try WValue.toLocal(.stack, func, Type.bool); const result_ptr = try func.allocStack(result_ty); - try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(func.target))); + try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(mod))); try func.store(result_ptr, ptr_val, ty, 0); break :val result_ptr; } else val: { @@ -7091,12 +7130,13 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const atomic_load = func.air.instructions.items(.data)[inst].atomic_load; const ptr = try func.resolveInst(atomic_load.ptr); const ty = func.air.typeOfIndex(inst); if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => .i32_atomic_load8_u, 2 => .i32_atomic_load16_u, 4 => .i32_atomic_load, @@ -7106,7 +7146,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); } else { _ = try func.load(ptr, ty, 0); @@ -7117,6 +7157,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data; @@ -7140,7 +7181,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.emitWValue(value); if (op == .Nand) { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; const and_res = try func.binOp(value, operand, ty, .@"and"); if (wasm_bits == 32) @@ -7157,7 +7198,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.select); } try func.addAtomicMemArg( - switch (ty.abiSize(func.target)) { + switch (ty.abiSize(mod)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, @@ -7166,7 +7207,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }, ); const select_res = try func.allocLocal(ty); @@ -7185,7 +7226,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => { try func.emitWValue(ptr); try func.emitWValue(operand); - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => switch (op) { .Xchg => .i32_atomic_rmw8_xchg_u, .Add => .i32_atomic_rmw8_add_u, @@ -7226,7 +7267,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); const result = try WValue.toLocal(.stack, func, ty); return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand }); @@ -7255,7 +7296,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .Xor => .xor, else => unreachable, }); - if (ty.isInt() and (op == .Add or op == .Sub)) { + if (ty.isInt(mod) and (op == .Add or op == .Sub)) { _ = try func.wrapOperand(.stack, ty); } try func.store(.stack, .stack, ty, ptr.offset()); @@ -7271,7 +7312,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.store(.stack, .stack, ty, ptr.offset()); }, .Nand => { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; try func.emitWValue(ptr); const and_res = try func.binOp(result, operand, ty, .@"and"); @@ -7302,6 +7343,7 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr = try func.resolveInst(bin_op.lhs); @@ -7310,7 +7352,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = ptr_ty.childType(); if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => .i32_atomic_store8, 2 => .i32_atomic_store16, 4 => .i32_atomic_store, @@ -7321,7 +7363,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.lowerToStack(operand); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); } else { try func.store(ptr, operand, ty, 0); diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 4692f65dd17a..7dd4425c01ea 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -5,9 +5,11 @@ //! Note: Above mentioned document is not an official specification, therefore called a convention. const std = @import("std"); -const Type = @import("../../type.zig").Type; const Target = std.Target; +const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); + /// Defines how to pass a type as part of a function signature, /// both for parameters as well as return values. pub const Class = enum { direct, indirect, none }; @@ -19,12 +21,13 @@ const direct: [2]Class = .{ .direct, .none }; /// Classifies a given Zig type to determine how they must be passed /// or returned as value within a wasm function. /// When all elements result in `.none`, no value must be passed in or returned. -pub fn classifyType(ty: Type, target: Target) [2]Class { - if (!ty.hasRuntimeBitsIgnoreComptime()) return none; - switch (ty.zigTypeTag()) { +pub fn classifyType(ty: Type, mod: *const Module) [2]Class { + const target = mod.getTarget(); + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; + switch (ty.zigTypeTag(mod)) { .Struct => { if (ty.containerLayout() == .Packed) { - if (ty.bitSize(target) <= 64) return direct; + if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } // When the struct type is non-scalar @@ -32,14 +35,14 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { // When the struct's alignment is non-natural const field = ty.structFields().values()[0]; if (field.abi_align != 0) { - if (field.abi_align > field.ty.abiAlignment(target)) { + if (field.abi_align > field.ty.abiAlignment(mod)) { return memory; } } - return classifyType(field.ty, target); + return classifyType(field.ty, mod); }, .Int, .Enum, .ErrorSet, .Vector => { - const int_bits = ty.intInfo(target).bits; + const int_bits = ty.intInfo(mod).bits; if (int_bits <= 64) return direct; if (int_bits <= 128) return .{ .direct, .direct }; return memory; @@ -53,7 +56,7 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { .Bool => return direct, .Array => return memory, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return direct; }, .Pointer => { @@ -62,13 +65,13 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { }, .Union => { if (ty.containerLayout() == .Packed) { - if (ty.bitSize(target) <= 64) return direct; + if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); std.debug.assert(layout.tag_size == 0); if (ty.unionFields().count() > 1) return memory; - return classifyType(ty.unionFields().values()[0].ty, target); + return classifyType(ty.unionFields().values()[0].ty, mod); }, .ErrorUnion, .Frame, @@ -90,29 +93,29 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { /// Returns the scalar type a given type can represent. /// Asserts given type can be represented as scalar, such as /// a struct with a single scalar field. -pub fn scalarType(ty: Type, target: std.Target) Type { - switch (ty.zigTypeTag()) { +pub fn scalarType(ty: Type, mod: *const Module) Type { + switch (ty.zigTypeTag(mod)) { .Struct => { switch (ty.containerLayout()) { .Packed => { const struct_obj = ty.castTag(.@"struct").?.data; - return scalarType(struct_obj.backing_int_ty, target); + return scalarType(struct_obj.backing_int_ty, mod); }, else => { std.debug.assert(ty.structFieldCount() == 1); - return scalarType(ty.structFieldType(0), target); + return scalarType(ty.structFieldType(0), mod); }, } }, .Union => { if (ty.containerLayout() != .Packed) { - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0 and layout.tag_size != 0) { - return scalarType(ty.unionTagTypeSafety().?, target); + return scalarType(ty.unionTagTypeSafety().?, mod); } std.debug.assert(ty.unionFields().count() == 1); } - return scalarType(ty.unionFields().values()[0].ty, target); + return scalarType(ty.unionFields().values()[0].ty, mod); }, else => return ty, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b614200e4187..826bca226678 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -605,14 +605,14 @@ const FrameAlloc = struct { .ref_count = 0, }; } - fn initType(ty: Type, target: Target) FrameAlloc { - return init(.{ .size = ty.abiSize(target), .alignment = ty.abiAlignment(target) }); + fn initType(ty: Type, mod: *const Module) FrameAlloc { + return init(.{ .size = ty.abiSize(mod), .alignment = ty.abiAlignment(mod) }); } }; const StackAllocation = struct { inst: ?Air.Inst.Index, - /// TODO do we need size? should be determined by inst.ty.abiSize(self.target.*) + /// TODO do we need size? should be determined by inst.ty.abiSize(mod) size: u32, }; @@ -714,12 +714,12 @@ pub fn generate( function.args = call_info.args; function.ret_mcv = call_info.return_value; function.frame_allocs.set(@enumToInt(FrameIndex.ret_addr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(function.target.*), - .alignment = @min(Type.usize.abiAlignment(function.target.*), call_info.stack_align), + .size = Type.usize.abiSize(mod), + .alignment = @min(Type.usize.abiAlignment(mod), call_info.stack_align), })); function.frame_allocs.set(@enumToInt(FrameIndex.base_ptr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(function.target.*), - .alignment = @min(Type.usize.abiAlignment(function.target.*) * 2, call_info.stack_align), + .size = Type.usize.abiSize(mod), + .alignment = @min(Type.usize.abiAlignment(mod) * 2, call_info.stack_align), })); function.frame_allocs.set( @enumToInt(FrameIndex.args_frame), @@ -1565,6 +1565,7 @@ fn asmMemoryRegisterImmediate( } fn gen(self: *Self) InnerError!void { + const mod = self.bin_file.options.module.?; const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { try self.asmRegister(.{ ._, .push }, .rbp); @@ -1582,7 +1583,7 @@ fn gen(self: *Self) InnerError!void { // register which the callee is free to clobber. Therefore, we purposely // spill it to stack immediately. const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(Type.usize, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(Type.usize, mod)); try self.genSetMem( .{ .frame = frame_index }, 0, @@ -1999,7 +2000,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { } fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { - switch (lazy_sym.ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lazy_sym.ty.zigTypeTag(mod)) { .Enum => { const enum_ty = lazy_sym.ty; wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(self.bin_file.options.module.?)}); @@ -2127,8 +2129,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } self.finishAirResult(inst, result); @@ -2252,14 +2254,14 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { + const mod = self.bin_file.options.module.?; const ptr_ty = self.air.typeOfIndex(inst); const val_ty = ptr_ty.childType(); return self.allocFrameIndex(FrameAlloc.init(.{ - .size = math.cast(u32, val_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + .size = math.cast(u32, val_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)}); }, - .alignment = @max(ptr_ty.ptrAlignment(self.target.*), 1), + .alignment = @max(ptr_ty.ptrAlignment(mod), 1), })); } @@ -2272,19 +2274,19 @@ fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue { } fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue { - const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; if (reg_ok) need_mem: { - if (abi_size <= @as(u32, switch (ty.zigTypeTag()) { + if (abi_size <= @as(u32, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 16, 32, 64, 128 => 16, 80 => break :need_mem, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16, 80 => break :need_mem, @@ -2294,18 +2296,18 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b }, else => 8, })) { - if (self.register_manager.tryAllocReg(inst, regClassForType(ty))) |reg| { + if (self.register_manager.tryAllocReg(inst, regClassForType(ty, mod))) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } } - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, mod)); return .{ .load_frame = .{ .index = frame_index } }; } -fn regClassForType(ty: Type) RegisterManager.RegisterBitSet { - return switch (ty.zigTypeTag()) { +fn regClassForType(ty: Type, mod: *const Module) RegisterManager.RegisterBitSet { + return switch (ty.zigTypeTag(mod)) { .Float, .Vector => sse, else => gp, }; @@ -2449,7 +2451,8 @@ pub fn spillRegisters(self: *Self, registers: []const Register) !void { /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, regClassForType(ty)); + const mod = self.bin_file.options.module.?; + const reg = try self.register_manager.allocReg(null, regClassForType(ty, mod)); try self.genSetReg(reg, ty, mcv); return reg; } @@ -2464,7 +2467,8 @@ fn copyToRegisterWithInstTracking( ty: Type, mcv: MCValue, ) !MCValue { - const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty)); + const mod = self.bin_file.options.module.?; + const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty, mod)); try self.genSetReg(reg, ty, mcv); return MCValue{ .register = reg }; } @@ -2618,14 +2622,15 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const src_ty = self.air.typeOf(ty_op.operand); - const src_int_info = src_ty.intInfo(self.target.*); + const src_int_info = src_ty.intInfo(mod); const dst_ty = self.air.typeOfIndex(inst); - const dst_int_info = dst_ty.intInfo(self.target.*); - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_int_info = dst_ty.intInfo(mod); + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; const extend = switch (src_int_info.signedness) { @@ -2670,14 +2675,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const high_bits = src_int_info.bits % 64; if (high_bits > 0) { - var high_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (extend) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = high_bits, - }; - const high_ty = Type.initPayload(&high_pl.base); + const high_ty = try mod.intType(extend, high_bits); try self.truncateRegister(high_ty, high_reg); try self.genCopy(Type.usize, high_mcv, .{ .register = high_reg }); } @@ -2706,12 +2704,13 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { } fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dst_ty = self.air.typeOfIndex(inst); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const result = result: { const src_mcv = try self.resolveInst(ty_op.operand); @@ -2724,10 +2723,10 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); - if (dst_ty.zigTypeTag() == .Vector) { - assert(src_ty.zigTypeTag() == .Vector and dst_ty.vectorLen() == src_ty.vectorLen()); - const dst_info = dst_ty.childType().intInfo(self.target.*); - const src_info = src_ty.childType().intInfo(self.target.*); + if (dst_ty.zigTypeTag(mod) == .Vector) { + assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen() == src_ty.vectorLen()); + const dst_info = dst_ty.childType().intInfo(mod); + const src_info = src_ty.childType().intInfo(mod); const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_info.bits) { 8 => switch (src_info.bits) { 16 => switch (dst_ty.vectorLen()) { @@ -2775,7 +2774,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { }, }; const full_ty = Type.initPayload(&full_pl.base); - const full_abi_size = @intCast(u32, full_ty.abiSize(self.target.*)); + const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val }); const splat_addr_mcv: MCValue = switch (splat_mcv) { @@ -2831,6 +2830,7 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -2840,11 +2840,11 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const len = try self.resolveInst(bin_op.rhs); const len_ty = self.air.typeOf(bin_op.rhs); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(self.target.*)), + @intCast(i32, ptr_ty.abiSize(mod)), len_ty, len, ); @@ -2873,23 +2873,24 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void } fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { + const mod = self.bin_file.options.module.?; const air_tag = self.air.instructions.items(.tag); const air_data = self.air.instructions.items(.data); const dst_ty = self.air.typeOf(dst_air); - const dst_info = dst_ty.intInfo(self.target.*); + const dst_info = dst_ty.intInfo(mod); if (Air.refToIndex(dst_air)) |inst| { switch (air_tag[inst]) { .constant => { const src_val = self.air.values[air_data[inst].ty_pl.payload]; var space: Value.BigIntSpace = undefined; - const src_int = src_val.toBigInt(&space, self.target.*); + const src_int = src_val.toBigInt(&space, mod); return @intCast(u16, src_int.bitCountTwosComp()) + @boolToInt(src_int.positive and dst_info.signedness == .signed); }, .intcast => { const src_ty = self.air.typeOf(air_data[inst].ty_op.operand); - const src_info = src_ty.intInfo(self.target.*); + const src_info = src_ty.intInfo(mod); return @min(switch (src_info.signedness) { .signed => switch (dst_info.signedness) { .signed => src_info.bits, @@ -2908,20 +2909,18 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { } fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result = result: { const tag = self.air.instructions.items(.tag)[inst]; const dst_ty = self.air.typeOfIndex(inst); - switch (dst_ty.zigTypeTag()) { + switch (dst_ty.zigTypeTag(mod)) { .Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs), else => {}, } - const dst_info = dst_ty.intInfo(self.target.*); - var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, .data = switch (tag) { + const dst_info = dst_ty.intInfo(mod); + const src_ty = try mod.intType(dst_info.signedness, switch (tag) { else => unreachable, .mul, .mulwrap => math.max3( self.activeIntBits(bin_op.lhs), @@ -2929,8 +2928,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { dst_info.bits / 2, ), .div_trunc, .div_floor, .div_exact, .rem, .mod => dst_info.bits, - } }; - const src_ty = Type.initPayload(&src_pl.base); + }); try self.spillEflagsIfOccupied(); try self.spillRegisters(&.{ .rax, .rdx }); @@ -2942,6 +2940,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { } fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); @@ -2968,7 +2967,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const reg_extra_bits = self.regExtraBits(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { if (reg_extra_bits > 0) { try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -2994,7 +2993,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(self.target.*)), + .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(mod)), }); try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); @@ -3005,14 +3004,14 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), cc, ); - if (reg_extra_bits > 0 and ty.isSignedInt()) { + if (reg_extra_bits > 0 and ty.isSignedInt(mod)) { try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3020,6 +3019,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { } fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); @@ -3046,7 +3046,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const reg_extra_bits = self.regExtraBits(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { if (reg_extra_bits > 0) { try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3076,14 +3076,14 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), cc, ); - if (reg_extra_bits > 0 and ty.isSignedInt()) { + if (reg_extra_bits > 0 and ty.isSignedInt(mod)) { try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3091,6 +3091,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { } fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); @@ -3118,7 +3119,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(limit_lock); const reg_bits = self.regBitSize(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { try self.genSetReg(limit_reg, ty, lhs_mcv); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv); try self.genShiftBinOpMir(.{ ._, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); @@ -3134,7 +3135,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { }; const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv); - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_mcv.register, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), @@ -3145,12 +3146,13 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { } fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { const tag = self.air.instructions.items(.tag)[inst]; const ty = self.air.typeOf(bin_op.lhs); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}), .Int => { try self.spillEflagsIfOccupied(); @@ -3160,7 +3162,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .sub_with_overflow => .sub, else => unreachable, }, bin_op.lhs, bin_op.rhs); - const int_info = ty.intInfo(self.target.*); + const int_info = ty.intInfo(mod); const cc: Condition = switch (int_info.signedness) { .unsigned => .c, .signed => .o, @@ -3177,16 +3179,16 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), Type.u1, .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), ty, partial_mcv, ); @@ -3194,7 +3196,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -3205,12 +3207,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl with overflow for Vector type", .{}), .Int => { try self.spillEflagsIfOccupied(); @@ -3219,7 +3222,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); const partial_mcv = try self.genShiftBinOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty); const partial_lock = switch (partial_mcv) { @@ -3249,16 +3252,16 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), tuple_ty.structFieldType(1), .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), tuple_ty.structFieldType(0), partial_mcv, ); @@ -3266,7 +3269,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -3283,6 +3286,7 @@ fn genSetFrameTruncatedOverflowCompare( src_mcv: MCValue, overflow_cc: ?Condition, ) !void { + const mod = self.bin_file.options.module.?; const src_lock = switch (src_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -3290,22 +3294,12 @@ fn genSetFrameTruncatedOverflowCompare( defer if (src_lock) |lock| self.register_manager.unlockReg(lock); const ty = tuple_ty.structFieldType(0); - const int_info = ty.intInfo(self.target.*); + const int_info = ty.intInfo(mod); - var hi_limb_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (int_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = (int_info.bits - 1) % 64 + 1, - }; - const hi_limb_ty = Type.initPayload(&hi_limb_pl.base); + const hi_limb_bits = (int_info.bits - 1) % 64 + 1; + const hi_limb_ty = try mod.intType(int_info.signedness, hi_limb_bits); - var rest_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = int_info.bits - hi_limb_pl.data, - }; - const rest_ty = Type.initPayload(&rest_pl.base); + const rest_ty = try mod.intType(.unsigned, int_info.bits - hi_limb_bits); const temp_regs = try self.register_manager.allocRegs(3, .{ null, null, null }, gp); const temp_locks = self.register_manager.lockRegsAssumeUnused(3, temp_regs); @@ -3335,7 +3329,7 @@ fn genSetFrameTruncatedOverflowCompare( ); } - const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)); + const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, mod)); if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv); try self.genSetMem( .{ .frame = frame_index }, @@ -3345,23 +3339,24 @@ fn genSetFrameTruncatedOverflowCompare( ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), tuple_ty.structFieldType(1), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, ); } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const dst_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = switch (dst_ty.zigTypeTag()) { + const result: MCValue = switch (dst_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}), .Int => result: { try self.spillEflagsIfOccupied(); try self.spillRegisters(&.{ .rax, .rdx }); - const dst_info = dst_ty.intInfo(self.target.*); + const dst_info = dst_ty.intInfo(mod); const cc: Condition = switch (dst_info.signedness) { .unsigned => .c, .signed => .o, @@ -3369,11 +3364,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_active_bits = self.activeIntBits(bin_op.lhs); const rhs_active_bits = self.activeIntBits(bin_op.rhs); - var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, .data = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2) }; - const src_ty = Type.initPayload(&src_pl.base); + const src_bits = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2); + const src_ty = try mod.intType(dst_info.signedness, src_bits); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -3391,26 +3383,26 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } }; } else { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, else => { // For now, this is the only supported multiply that doesn't fit in a register. - assert(dst_info.bits <= 128 and src_pl.data == 64); + assert(dst_info.bits <= 128 and src_bits == 64); const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); if (dst_info.bits >= lhs_active_bits + rhs_active_bits) { try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), tuple_ty.structFieldType(0), partial_mcv, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), tuple_ty.structFieldType(1), .{ .immediate = 0 }, // cc being set is impossible ); @@ -3433,7 +3425,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { /// Clobbers .rax and .rdx registers. /// Quotient is saved in .rax and remainder in .rdx. fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size > 8) { return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{}); } @@ -3472,8 +3465,9 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue /// Always returns a register. /// Clobbers .rax and .rdx registers. fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const int_info = ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); + const int_info = ty.intInfo(mod); const dividend: Register = switch (lhs) { .register => |reg| reg, else => try self.copyToTmpRegister(ty, lhs), @@ -3585,6 +3579,7 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { const dst_ty = self.air.typeOfIndex(inst); @@ -3592,7 +3587,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const opt_ty = src_ty.childType(); const src_mcv = try self.resolveInst(ty_op.operand); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { break :result if (self.liveness.isUnused(inst)) .unreach else if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) @@ -3610,7 +3605,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); const pl_ty = dst_ty.childType(); - const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*)); + const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 }); break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv; }; @@ -3618,6 +3613,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_union_ty = self.air.typeOf(ty_op.operand); const err_ty = err_union_ty.errorUnionSet(); @@ -3629,11 +3625,11 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result operand; } - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const err_off = errUnionErrorOffset(payload_ty, mod); switch (operand) { .register => |reg| { // TODO reuse operand @@ -3678,12 +3674,13 @@ fn genUnwrapErrorUnionPayloadMir( err_union_ty: Type, err_union: MCValue, ) !MCValue { + const mod = self.bin_file.options.module.?; const payload_ty = err_union_ty.errorUnionPayload(); const result: MCValue = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); switch (err_union) { .load_frame => |frame_addr| break :result .{ .load_frame = .{ .index = frame_addr.index, @@ -3720,6 +3717,7 @@ fn genUnwrapErrorUnionPayloadMir( // *(E!T) -> E fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); @@ -3739,8 +3737,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(); const pl_ty = eu_ty.errorUnionPayload(); const err_ty = eu_ty.errorUnionSet(); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); - const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .mov }, registerAlias(dst_reg, err_abi_size), @@ -3755,6 +3753,7 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { // *(E!T) -> *T fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); @@ -3777,8 +3776,8 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(); const pl_ty = eu_ty.errorUnionPayload(); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3789,6 +3788,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const src_ty = self.air.typeOf(ty_op.operand); @@ -3803,8 +3803,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(); const pl_ty = eu_ty.errorUnionPayload(); const err_ty = eu_ty.errorUnionSet(); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); - const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ @@ -3824,8 +3824,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3853,14 +3853,15 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const pl_ty = self.air.typeOf(ty_op.operand); - if (!pl_ty.hasRuntimeBits()) break :result .{ .immediate = 1 }; + if (!pl_ty.hasRuntimeBits(mod)) break :result .{ .immediate = 1 }; const opt_ty = self.air.typeOfIndex(inst); const pl_mcv = try self.resolveInst(ty_op.operand); - const same_repr = opt_ty.optionalReprIsPayload(); + const same_repr = opt_ty.optionalReprIsPayload(mod); if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv; const pl_lock: ?RegisterLock = switch (pl_mcv) { @@ -3873,7 +3874,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { try self.genCopy(pl_ty, opt_mcv, pl_mcv); if (!same_repr) { - const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*)); + const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); switch (opt_mcv) { else => unreachable, @@ -3900,6 +3901,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); @@ -3908,11 +3910,11 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) break :result .{ .immediate = 0 }; + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 }; - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, self.target.*)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); break :result .{ .load_frame = .{ .index = frame_index } }; @@ -3922,6 +3924,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); @@ -3929,11 +3932,11 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const err_ty = eu_ty.errorUnionSet(); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) break :result try self.resolveInst(ty_op.operand); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, self.target.*)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); const operand = try self.resolveInst(ty_op.operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); @@ -3974,6 +3977,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); @@ -3994,7 +3998,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -4041,6 +4045,7 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi } fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { + const mod = self.bin_file.options.module.?; const slice_ty = self.air.typeOf(lhs); const slice_mcv = try self.resolveInst(lhs); const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { @@ -4050,7 +4055,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock); const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); @@ -4097,6 +4102,7 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const array_ty = self.air.typeOf(bin_op.lhs); @@ -4108,7 +4114,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { defer if (array_lock) |lock| self.register_manager.unlockReg(lock); const elem_ty = array_ty.childType(); - const elem_abi_size = elem_ty.abiSize(self.target.*); + const elem_abi_size = elem_ty.abiSize(mod); const index_ty = self.air.typeOf(bin_op.rhs); const index = try self.resolveInst(bin_op.rhs); @@ -4125,7 +4131,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const addr_reg = try self.register_manager.allocReg(null, gp); switch (array) { .register => { - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array); try self.asmRegisterMemory( .{ ._, .lea }, @@ -4162,14 +4168,15 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); // this is identical to the `airPtrElemPtr` codegen expect here an // additional `mov` is needed at the end to get the actual value - const elem_ty = ptr_ty.elemType2(); - const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_ty = ptr_ty.elemType2(mod); + const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); const index_ty = self.air.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_lock = switch (index_mcv) { @@ -4207,6 +4214,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4218,8 +4226,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { }; defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = ptr_ty.elemType2(); - const elem_abi_size = elem_ty.abiSize(self.target.*); + const elem_ty = ptr_ty.elemType2(mod); + const elem_abi_size = elem_ty.abiSize(mod); const index_ty = self.air.typeOf(extra.rhs); const index = try self.resolveInst(extra.rhs); const index_lock: ?RegisterLock = switch (index) { @@ -4239,11 +4247,12 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_union_ty = self.air.typeOf(bin_op.lhs); const union_ty = ptr_union_ty.childType(); const tag_ty = self.air.typeOf(bin_op.rhs); - const layout = union_ty.unionGetLayout(self.target.*); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -4284,11 +4293,12 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { } fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const tag_ty = self.air.typeOfIndex(inst); const union_ty = self.air.typeOf(ty_op.operand); - const layout = union_ty.unionGetLayout(self.target.*); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ ty_op.operand, .none, .none }); @@ -4302,7 +4312,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const tag_abi_size = tag_ty.abiSize(self.target.*); + const tag_abi_size = tag_ty.abiSize(mod); const dst_mcv: MCValue = blk: { switch (operand) { .load_frame => |frame_addr| { @@ -4337,6 +4347,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { } fn airClz(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { const dst_ty = self.air.typeOfIndex(inst); @@ -4358,7 +4369,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const src_bits = src_ty.bitSize(self.target.*); + const src_bits = src_ty.bitSize(mod); if (self.hasFeature(.lzcnt)) { if (src_bits <= 8) { const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv); @@ -4405,7 +4416,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { } if (src_bits > 64) - return self.fail("TODO airClz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); + return self.fail("TODO airClz of {}", .{src_ty.fmt(mod)}); if (math.isPowerOfTwo(src_bits)) { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits ^ (src_bits - 1), @@ -4422,7 +4433,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsr }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(imm_reg, cmov_abi_size), @@ -4449,7 +4460,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { .{ .register = wide_reg }, ); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(imm_reg, cmov_abi_size), registerAlias(dst_reg, cmov_abi_size), @@ -4465,11 +4476,12 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { } fn airCtz(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { const dst_ty = self.air.typeOfIndex(inst); const src_ty = self.air.typeOf(ty_op.operand); - const src_bits = src_ty.bitSize(self.target.*); + const src_bits = src_ty.bitSize(mod); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -4548,7 +4560,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsf }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(width_reg, cmov_abi_size), @@ -4560,10 +4572,11 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { } fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); if (self.hasFeature(.popcnt)) { @@ -4729,6 +4742,7 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); @@ -4738,7 +4752,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { switch (self.regExtraBits(src_ty)) { 0 => {}, else => |extra| try self.genBinOpMir( - if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh }, + if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh }, src_ty, dst_mcv, .{ .immediate = extra }, @@ -4749,10 +4763,11 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { } fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false); @@ -4847,7 +4862,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { switch (self.regExtraBits(src_ty)) { 0 => {}, else => |extra| try self.genBinOpMir( - if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh }, + if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh }, src_ty, dst_mcv, .{ .immediate = extra }, @@ -4858,17 +4873,18 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { } fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const tag = self.air.instructions.items(.tag)[inst]; const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); - const abi_size: u32 = switch (ty.abiSize(self.target.*)) { + const abi_size: u32 = switch (ty.abiSize(mod)) { 1...16 => 16, 17...32 => 32, else => return self.fail("TODO implement airFloatSign for {}", .{ ty.fmt(self.bin_file.options.module.?), }), }; - const scalar_bits = ty.scalarType().floatBits(self.target.*); + const scalar_bits = ty.scalarType(mod).floatBits(self.target.*); const src_mcv = try self.resolveInst(un_op); const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; @@ -4905,21 +4921,17 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { var stack align(@alignOf(ExpectedContents)) = std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - var int_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_signed }, - .data = scalar_bits, - }; var vec_pl = Type.Payload.Array{ .base = .{ .tag = .vector }, .data = .{ .len = @divExact(abi_size * 8, scalar_bits), - .elem_type = Type.initPayload(&int_pl.base), + .elem_type = try mod.intType(.signed, scalar_bits), }, }; const vec_ty = Type.initPayload(&vec_pl.base); const sign_val = switch (tag) { - .neg => try vec_ty.minInt(stack.get(), self.target.*), - .fabs => try vec_ty.maxInt(stack.get(), self.target.*), + .neg => try vec_ty.minInt(stack.get(), mod), + .fabs => try vec_ty.maxInt(stack.get(), mod), else => unreachable, }; @@ -5008,17 +5020,18 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: u4) !void { } fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4) !void { + const mod = self.bin_file.options.module.?; if (!self.hasFeature(.sse4_1)) return self.fail("TODO implement genRound without sse4_1 feature", .{}); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, 64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, @@ -5041,7 +5054,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 })) |tag| tag else return self.fail("TODO implement genRound for {}", .{ ty.fmt(self.bin_file.options.module.?), }); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const dst_alias = registerAlias(dst_reg, abi_size); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( @@ -5078,9 +5091,10 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 } fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5092,7 +5106,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const result: MCValue = result: { - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { const mat_src_reg = if (src_mcv.isRegister()) @@ -5114,7 +5128,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { 1 => { @@ -5186,7 +5200,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { }, else => unreachable, })) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( @@ -5274,10 +5288,11 @@ fn reuseOperandAdvanced( } fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; const ptr_info = ptr_ty.ptrInfo().data; const val_ty = ptr_info.pointee_type; - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); const limb_abi_size: u32 = @min(val_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const val_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size); @@ -5382,20 +5397,21 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; try self.spillRegisters(&.{ .rdi, .rsi, .rcx }); const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx }); defer for (reg_locks) |lock| self.register_manager.unlockReg(lock); const ptr_ty = self.air.typeOf(ty_op.operand); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); - const elem_rc = regClassForType(elem_ty); - const ptr_rc = regClassForType(ptr_ty); + const elem_rc = regClassForType(elem_ty, mod); + const ptr_rc = regClassForType(ptr_ty, mod); const ptr_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (elem_size <= 8 and elem_rc.supersetOf(ptr_rc) and @@ -5416,13 +5432,14 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; const ptr_info = ptr_ty.ptrInfo().data; const src_ty = ptr_ty.childType(); const limb_abi_size: u16 = @min(ptr_info.host_size, 8); const limb_abi_bits = limb_abi_size * 8; - const src_bit_size = src_ty.bitSize(self.target.*); + const src_bit_size = src_ty.bitSize(mod); const src_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size); const src_bit_off = ptr_info.bit_offset % limb_abi_bits; @@ -5555,14 +5572,15 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { } fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { + const mod = self.bin_file.options.module.?; const ptr_field_ty = self.air.typeOfIndex(inst); const ptr_container_ty = self.air.typeOf(operand); const container_ty = ptr_container_ty.childType(); const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { - .Auto, .Extern => container_ty.structFieldOffset(index, self.target.*), - .Packed => if (container_ty.zigTypeTag() == .Struct and + .Auto, .Extern => container_ty.structFieldOffset(index, mod), + .Packed => if (container_ty.zigTypeTag(mod) == .Struct and ptr_field_ty.ptrInfo().data.host_size == 0) - container_ty.packedStructFieldByteOffset(index, self.target.*) + container_ty.packedStructFieldByteOffset(index, mod) else 0, }); @@ -5577,6 +5595,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 } fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const result: MCValue = result: { @@ -5584,17 +5603,17 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const index = extra.field_index; const container_ty = self.air.typeOf(operand); - const container_rc = regClassForType(container_ty); + const container_rc = regClassForType(container_ty, mod); const field_ty = container_ty.structFieldType(index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; - const field_rc = regClassForType(field_ty); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + const field_rc = regClassForType(field_ty, mod); const field_is_gp = field_rc.supersetOf(gp); const src_mcv = try self.resolveInst(operand); const field_off = switch (container_ty.containerLayout()) { - .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, self.target.*) * 8), + .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8), .Packed => if (container_ty.castTag(.@"struct")) |struct_obj| - struct_obj.data.packedFieldBitOffset(self.target.*, index) + struct_obj.data.packedFieldBitOffset(mod, index) else 0, }; @@ -5611,7 +5630,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - const field_abi_size = @intCast(u32, field_ty.abiSize(self.target.*)); + const field_abi_size = @intCast(u32, field_ty.abiSize(mod)); const limb_abi_size: u32 = @min(field_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size); @@ -5733,12 +5752,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const inst_ty = self.air.typeOfIndex(inst); const parent_ty = inst_ty.childType(); - const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, self.target.*)); + const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); const src_mcv = try self.resolveInst(extra.field_ptr); const dst_mcv = if (src_mcv.isRegisterOffset() and @@ -5751,9 +5771,10 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { } fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue { + const mod = self.bin_file.options.module.?; const src_ty = self.air.typeOf(src_air); const src_mcv = try self.resolveInst(src_air); - if (src_ty.zigTypeTag() == .Vector) { + if (src_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); } @@ -5786,28 +5807,22 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: switch (tag) { .not => { - const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(self.target.*), 8)); + const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(mod), 8)); const int_info = if (src_ty.tag() == .bool) std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 } else - src_ty.intInfo(self.target.*); + src_ty.intInfo(mod); var byte_off: i32 = 0; while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) { - var limb_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (int_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)), - }; - const limb_ty = Type.initPayload(&limb_pl.base); + const limb_bits = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)); + const limb_ty = try mod.intType(int_info.signedness, limb_bits); const limb_mcv = switch (byte_off) { 0 => dst_mcv, else => dst_mcv.address().offset(byte_off).deref(), }; - if (limb_pl.base.tag == .int_unsigned and self.regExtraBits(limb_ty) > 0) { - const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_pl.data); + if (int_info.signedness == .unsigned and self.regExtraBits(limb_ty) > 0) { + const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_bits); try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask }); } else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv); } @@ -5819,7 +5834,8 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: } fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void { - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(self.bin_file.options.module.?), @@ -5866,6 +5882,7 @@ fn genShiftBinOpMir( lhs_mcv: MCValue, shift_mcv: MCValue, ) !void { + const mod = self.bin_file.options.module.?; const rhs_mcv: MCValue = rhs: { switch (shift_mcv) { .immediate => |imm| switch (imm) { @@ -5880,7 +5897,7 @@ fn genShiftBinOpMir( break :rhs .{ .register = .rcx }; }; - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size <= 8) { switch (lhs_mcv) { .register => |lhs_reg| switch (rhs_mcv) { @@ -6099,13 +6116,14 @@ fn genShiftBinOp( lhs_ty: Type, rhs_ty: Type, ) !MCValue { - if (lhs_ty.zigTypeTag() == .Vector) { + const mod = self.bin_file.options.module.?; + if (lhs_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } - assert(rhs_ty.abiSize(self.target.*) == 1); + assert(rhs_ty.abiSize(mod) == 1); - const lhs_abi_size = lhs_ty.abiSize(self.target.*); + const lhs_abi_size = lhs_ty.abiSize(mod); if (lhs_abi_size > 16) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } @@ -6136,7 +6154,7 @@ fn genShiftBinOp( break :dst dst_mcv; }; - const signedness = lhs_ty.intInfo(self.target.*).signedness; + const signedness = lhs_ty.intInfo(mod).signedness; try self.genShiftBinOpMir(switch (air_tag) { .shl, .shl_exact => switch (signedness) { .signed => .{ ._l, .sa }, @@ -6163,11 +6181,12 @@ fn genMulDivBinOp( lhs: MCValue, rhs: MCValue, ) !MCValue { - if (dst_ty.zigTypeTag() == .Vector or dst_ty.zigTypeTag() == .Float) { + const mod = self.bin_file.options.module.?; + if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) { return self.fail("TODO implement genMulDivBinOp for {}", .{dst_ty.fmtDebug()}); } - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); if (switch (tag) { else => unreachable, .mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2, @@ -6184,7 +6203,7 @@ fn genMulDivBinOp( const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock); - const signedness = ty.intInfo(self.target.*).signedness; + const signedness = ty.intInfo(mod).signedness; switch (tag) { .mul, .mulwrap, @@ -6338,13 +6357,14 @@ fn genBinOp( lhs_air: Air.Inst.Ref, rhs_air: Air.Inst.Ref, ) !MCValue { + const mod = self.bin_file.options.module.?; const lhs_ty = self.air.typeOf(lhs_air); const rhs_ty = self.air.typeOf(rhs_air); - const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, lhs_ty.abiSize(mod)); const maybe_mask_reg = switch (air_tag) { else => null, - .max, .min => if (lhs_ty.scalarType().isRuntimeFloat()) registerAlias( + .max, .min => if (lhs_ty.scalarType(mod).isRuntimeFloat()) registerAlias( if (!self.hasFeature(.avx) and self.hasFeature(.sse4_1)) mask: { try self.register_manager.getReg(.xmm0, null); break :mask .xmm0; @@ -6384,7 +6404,7 @@ fn genBinOp( else => false, }; - const vec_op = switch (lhs_ty.zigTypeTag()) { + const vec_op = switch (lhs_ty.zigTypeTag(mod)) { else => false, .Float, .Vector => true, }; @@ -6456,7 +6476,7 @@ fn genBinOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - const elem_size = lhs_ty.elemType2().abiSize(self.target.*); + const elem_size = lhs_ty.elemType2(mod).abiSize(mod); try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size }); try self.genBinOpMir( switch (air_tag) { @@ -6506,7 +6526,7 @@ fn genBinOp( try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, dst_mcv, mat_src_mcv); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); const cc: Condition = switch (int_info.signedness) { .unsigned => switch (air_tag) { .min => .a, @@ -6520,7 +6540,7 @@ fn genBinOp( }, }; - const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(mod)), 2); const tmp_reg = switch (dst_mcv) { .register => |reg| reg, else => try self.copyToTmpRegister(lhs_ty, dst_mcv), @@ -6581,7 +6601,7 @@ fn genBinOp( } const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { else => unreachable, .Float => switch (lhs_ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { @@ -6657,9 +6677,9 @@ fn genBinOp( 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { else => null, - .Int => switch (lhs_ty.childType().intInfo(self.target.*).bits) { + .Int => switch (lhs_ty.childType().intInfo(mod).bits) { 8 => switch (lhs_ty.vectorLen()) { 1...16 => switch (air_tag) { .add, @@ -6671,7 +6691,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .mins } else if (self.hasFeature(.sse4_1)) @@ -6685,7 +6705,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6711,11 +6731,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null, }, @@ -6737,7 +6757,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .mins } else @@ -6747,7 +6767,7 @@ fn genBinOp( else .{ .p_w, .minu }, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .maxs } else @@ -6772,11 +6792,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null, }, @@ -6803,7 +6823,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .mins } else if (self.hasFeature(.sse4_1)) @@ -6817,7 +6837,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6846,11 +6866,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null, }, @@ -7206,14 +7226,14 @@ fn genBinOp( const rhs_copy_reg = registerAlias(src_mcv.getReg().?, abi_size); try self.asmRegisterRegisterRegisterImmediate( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .cmp }, 64 => .{ .v_sd, .cmp }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1 => .{ .v_ss, .cmp }, @@ -7240,14 +7260,14 @@ fn genBinOp( Immediate.u(3), // unord ); try self.asmRegisterRegisterRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ .v_ps, .blendv }, 64 => .{ .v_pd, .blendv }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...8 => .{ .v_ps, .blendv }, @@ -7274,14 +7294,14 @@ fn genBinOp( } else { const has_blend = self.hasFeature(.sse4_1); try self.asmRegisterRegisterImmediate( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ss, .cmp }, 64 => .{ ._sd, .cmp }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1 => .{ ._ss, .cmp }, @@ -7307,14 +7327,14 @@ fn genBinOp( Immediate.u(if (has_blend) 3 else 7), // unord, ord ); if (has_blend) try self.asmRegisterRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .blendv }, 64 => .{ ._pd, .blendv }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...4 => .{ ._ps, .blendv }, @@ -7338,14 +7358,14 @@ fn genBinOp( mask_reg, ) else { try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .@"and" }, 64 => .{ ._pd, .@"and" }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...4 => .{ ._ps, .@"and" }, @@ -7368,14 +7388,14 @@ fn genBinOp( mask_reg, ); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .andn }, 64 => .{ ._pd, .andn }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...4 => .{ ._ps, .andn }, @@ -7398,14 +7418,14 @@ fn genBinOp( lhs_copy_reg.?, ); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .@"or" }, 64 => .{ ._pd, .@"or" }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...4 => .{ ._ps, .@"or" }, @@ -7442,7 +7462,8 @@ fn genBinOpMir( dst_mcv: MCValue, src_mcv: MCValue, ) !void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (dst_mcv) { .none, .unreach, @@ -7640,7 +7661,7 @@ fn genBinOpMir( defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock); const ty_signedness = - if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned; + if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned; const limb_ty = if (abi_size <= 8) ty else switch (ty_signedness) { .signed => Type.usize, .unsigned => Type.isize, @@ -7796,7 +7817,8 @@ fn genBinOpMir( /// Performs multi-operand integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv. /// Does not support byte-size operands. fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); switch (dst_mcv) { .none, .unreach, @@ -8022,6 +8044,7 @@ fn airFence(self: *Self, inst: Air.Inst.Index) !void { } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { + const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for x86_64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; @@ -8029,7 +8052,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const ty = self.air.typeOf(callee); - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, @@ -8077,7 +8100,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .none, .unreach => null, .indirect => |reg_off| lock: { const ret_ty = fn_ty.fnReturnType(); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, mod)); try self.genSetReg(reg_off.reg, Type.usize, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, }); @@ -8100,8 +8123,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - const mod = self.bin_file.options.module.?; - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (if (func_value.castTag(.function)) |func_payload| func_payload.data.owner_decl else if (func_value.castTag(.decl_ref)) |decl_ref_payload| @@ -8178,7 +8200,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(.rax, Type.usize, mcv); try self.asmRegister(.{ ._, .call }, .rax); @@ -8234,6 +8256,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { } fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); @@ -8255,9 +8278,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const result = MCValue{ - .eflags = switch (ty.zigTypeTag()) { + .eflags = switch (ty.zigTypeTag(mod)) { else => result: { - const abi_size = @intCast(u16, ty.abiSize(self.target.*)); + const abi_size = @intCast(u16, ty.abiSize(mod)); const may_flip: enum { may_flip, must_flip, @@ -8290,7 +8313,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (src_lock) |lock| self.register_manager.unlockReg(lock); break :result Condition.fromCompareOperator( - if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned, + if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned, result_op: { const flipped_op = if (flipped) op.reverse() else op; if (abi_size > 8) switch (flipped_op) { @@ -8404,7 +8427,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp2_reg, tmp1_reg); try self.genBinOpMir(.{ ._ss, .ucomi }, ty, tmp1_mcv, tmp2_mcv); } else return self.fail("TODO implement airCmp for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), 32 => try self.genBinOpMir( .{ ._ss, .ucomi }, @@ -8419,7 +8442,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { src_mcv, ), else => return self.fail("TODO implement airCmp for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), } @@ -8454,7 +8477,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { self.eflags_inst = inst; const op_ty = self.air.typeOf(un_op); - const op_abi_size = @intCast(u32, op_ty.abiSize(self.target.*)); + const op_abi_size = @intCast(u32, op_ty.abiSize(mod)); const op_mcv = try self.resolveInst(un_op); const dst_reg = switch (op_mcv) { .register => |reg| reg, @@ -8573,7 +8596,8 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { } fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (mcv) { .eflags => |cc| { // Here we map the opposites since the jump is to the false branch. @@ -8646,6 +8670,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; switch (opt_mcv) { .register_overflow => |ro| return .{ .eflags = ro.eflags.negate() }, else => {}, @@ -8658,10 +8683,10 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC const pl_ty = opt_ty.optionalChild(&pl_buf); var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload()) + const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool }; + .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; switch (opt_mcv) { .none, @@ -8681,14 +8706,14 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .register => |opt_reg| { if (some_info.off == 0) { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); const alias_reg = registerAlias(opt_reg, some_abi_size); assert(some_abi_size * 8 == alias_reg.bitSize()); try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg); return .{ .eflags = .z }; } assert(some_info.ty.tag() == .bool); - const opt_abi_size = @intCast(u32, opt_ty.abiSize(self.target.*)); + const opt_abi_size = @intCast(u32, opt_ty.abiSize(mod)); try self.asmRegisterImmediate( .{ ._, .bt }, registerAlias(opt_reg, opt_abi_size), @@ -8707,7 +8732,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC defer self.register_manager.unlockReg(addr_reg_lock); try self.genSetReg(addr_reg, Type.usize, opt_mcv.address()); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8720,7 +8745,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC }, .indirect, .load_frame => { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) { @@ -8742,6 +8767,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC } fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; try self.spillEflagsIfOccupied(); self.eflags_inst = inst; @@ -8750,10 +8776,10 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const pl_ty = opt_ty.optionalChild(&pl_buf); var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload()) + const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool }; + .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; const ptr_reg = switch (ptr_mcv) { .register => |reg| reg, @@ -8762,7 +8788,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const ptr_lock = self.register_manager.lockReg(ptr_reg); defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8775,6 +8801,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) } fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; const err_type = ty.errorUnionSet(); if (err_type.errorSetIsEmpty()) { @@ -8786,7 +8813,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! self.eflags_inst = inst; } - const err_off = errUnionErrorOffset(ty.errorUnionPayload(), self.target.*); + const err_off = errUnionErrorOffset(ty.errorUnionPayload(), mod); switch (operand) { .register => |reg| { const eu_lock = self.register_manager.lockReg(reg); @@ -9088,12 +9115,13 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void { } fn airBr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const br = self.air.instructions.items(.data)[inst].br; const src_mcv = try self.resolveInst(br.operand); const block_ty = self.air.typeOfIndex(br.block_inst); const block_unused = - !block_ty.hasRuntimeBitsIgnoreComptime() or self.liveness.isUnused(br.block_inst); + !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst); const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; const block_data = self.blocks.getPtr(br.block_inst).?; const first_br = block_data.relocs.items.len == 0; @@ -9402,7 +9430,8 @@ const MoveStrategy = union(enum) { }; }; fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { - switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (ty.zigTypeTag(mod)) { else => return .{ .move = .{ ._, .mov } }, .Float => switch (ty.floatBits(self.target.*)) { 16 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ @@ -9419,8 +9448,8 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, else => {}, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Int => switch (ty.childType().intInfo(self.target.*).bits) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { + .Int => switch (ty.childType().intInfo(mod).bits) { 8 => switch (ty.vectorLen()) { 1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{ .insert = .{ .vp_b, .insr }, @@ -9647,7 +9676,8 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError } fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size * 8 > dst_reg.bitSize()) return self.fail("genSetReg called with a value larger than dst_reg", .{}); switch (src_mcv) { @@ -9730,7 +9760,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .{ .register = try self.copyToTmpRegister(ty, src_mcv) }, ), .sse => try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType().zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType(mod).zigTypeTag(mod)) { else => switch (abi_size) { 1...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, @@ -9738,7 +9768,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr 17...32 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else null, else => null, }, - .Float => switch (ty.scalarType().floatBits(self.target.*)) { + .Float => switch (ty.scalarType(mod).floatBits(self.target.*)) { 16, 128 => switch (abi_size) { 2...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, @@ -9789,7 +9819,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .indirect => try self.moveStrategy(ty, false), .load_frame => |frame_addr| try self.moveStrategy( ty, - self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(self.target.*), + self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(mod), ), .lea_frame => .{ .move = .{ ._, .lea } }, else => unreachable, @@ -9821,7 +9851,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr switch (try self.moveStrategy(ty, mem.isAlignedGeneric( u32, @bitCast(u32, small_addr), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ))) { .move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem), .insert_extract => |ie| try self.asmRegisterMemoryImmediate( @@ -9839,7 +9869,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ), } }, - .load_direct => |sym_index| switch (ty.zigTypeTag()) { + .load_direct => |sym_index| switch (ty.zigTypeTag(mod)) { else => { const atom_index = try self.owner.getSymbolIndex(self); _ = try self.addInst(.{ @@ -9933,7 +9963,8 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr } fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); const dst_ptr_mcv: MCValue = switch (base) { .none => .{ .immediate = @bitCast(u64, @as(i64, disp)) }, .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, @@ -9945,7 +9976,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal try self.genInlineMemset(dst_ptr_mcv, .{ .immediate = 0xaa }, .{ .immediate = abi_size }), .immediate => |imm| switch (abi_size) { 1, 2, 4 => { - const immediate = if (ty.isSignedInt()) + const immediate = if (ty.isSignedInt(mod)) Immediate.s(@truncate(i32, @bitCast(i64, imm))) else Immediate.u(@intCast(u32, imm)); @@ -9967,7 +9998,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal while (offset < abi_size) : (offset += 4) try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(.dword, .{ .base = base, .disp = disp + offset }), - if (ty.isSignedInt()) + if (ty.isSignedInt(mod)) Immediate.s(@truncate( i32, @bitCast(i64, imm) >> (math.cast(u6, offset * 8) orelse 63), @@ -9991,19 +10022,19 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .none => mem.isAlignedGeneric( u32, @bitCast(u32, disp), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ), .reg => |reg| switch (reg) { .es, .cs, .ss, .ds => mem.isAlignedGeneric( u32, @bitCast(u32, disp), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ), else => false, }, .frame => |frame_index| self.getFrameAddrAlignment( .{ .index = frame_index, .off = disp }, - ) >= ty.abiAlignment(self.target.*), + ) >= ty.abiAlignment(mod), })) { .move => |tag| try self.asmMemoryRegister(tag, dst_mem, src_alias), .insert_extract, .vex_insert_extract => |ie| try self.asmMemoryRegisterImmediate( @@ -10017,13 +10048,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .register_overflow => |ro| { try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(0, self.target.*)), + disp + @intCast(i32, ty.structFieldOffset(0, mod)), ty.structFieldType(0), .{ .register = ro.reg }, ); try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(1, self.target.*)), + disp + @intCast(i32, ty.structFieldOffset(1, mod)), ty.structFieldType(1), .{ .eflags = ro.eflags }, ); @@ -10146,13 +10177,14 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dst_ty = self.air.typeOfIndex(inst); const src_ty = self.air.typeOf(ty_op.operand); const result = result: { - const dst_rc = regClassForType(dst_ty); - const src_rc = regClassForType(src_ty); + const dst_rc = regClassForType(dst_ty, mod); + const src_rc = regClassForType(src_ty, mod); const src_mcv = try self.resolveInst(ty_op.operand); const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; @@ -10172,13 +10204,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; const dst_signedness = - if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; + if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; const src_signedness = - if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; + if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; if (dst_signedness == src_signedness) break :result dst_mcv; - const abi_size = @intCast(u16, dst_ty.abiSize(self.target.*)); - const bit_size = @intCast(u16, dst_ty.bitSize(self.target.*)); + const abi_size = @intCast(u16, dst_ty.abiSize(mod)); + const bit_size = @intCast(u16, dst_ty.bitSize(mod)); if (abi_size * 8 <= bit_size) break :result dst_mcv; const dst_limbs_len = math.divCeil(i32, bit_size, 64) catch unreachable; @@ -10192,14 +10224,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const high_lock = self.register_manager.lockReg(high_reg); defer if (high_lock) |lock| self.register_manager.unlockReg(lock); - var high_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (dst_signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = bit_size % 64, - }; - const high_ty = Type.initPayload(&high_pl.base); + const high_ty = try mod.intType(dst_signedness, bit_size % 64); try self.truncateRegister(high_ty, high_reg); if (!dst_mcv.isRegister()) try self.genCopy( @@ -10213,6 +10238,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ty = self.air.typeOfIndex(inst); @@ -10221,11 +10247,11 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const array_ty = ptr_ty.childType(); const array_len = array_ty.arrayLen(); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(self.target.*)), + @intCast(i32, ptr_ty.abiSize(mod)), Type.usize, .{ .immediate = array_len }, ); @@ -10235,12 +10261,13 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { } fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); - const src_bits = @intCast(u32, src_ty.bitSize(self.target.*)); + const src_bits = @intCast(u32, src_ty.bitSize(mod)); const src_signedness = - if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; + if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; const dst_ty = self.air.typeOfIndex(inst); const src_size = math.divCeil(u32, @max(switch (src_signedness) { @@ -10248,7 +10275,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { .unsigned => src_bits + 1, }, 32), 8) catch unreachable; if (src_size > 8) return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(mod), dst_ty.fmt(mod), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -10261,12 +10288,12 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { if (src_bits < src_size * 8) try self.truncateRegister(src_ty, src_reg); - const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod)); const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(mod)) { .Float => switch (dst_ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 }, 64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 }, @@ -10275,7 +10302,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { }, else => null, })) |tag| tag else return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(mod), dst_ty.fmt(mod), }); const dst_alias = dst_reg.to128(); const src_alias = registerAlias(src_reg, src_size); @@ -10288,13 +10315,14 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { } fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); const dst_ty = self.air.typeOfIndex(inst); - const dst_bits = @intCast(u32, dst_ty.bitSize(self.target.*)); + const dst_bits = @intCast(u32, dst_ty.bitSize(mod)); const dst_signedness = - if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; + if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; const dst_size = math.divCeil(u32, @max(switch (dst_signedness) { .signed => dst_bits, @@ -10312,13 +10340,13 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod)); const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag(mod)) { .Float => switch (src_ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_, .cvttss2si } else .{ ._, .cvttss2si }, 64 => if (self.hasFeature(.avx)) .{ .v_, .cvttsd2si } else .{ ._, .cvttsd2si }, @@ -10339,12 +10367,13 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr_ty = self.air.typeOf(extra.ptr); const val_ty = self.air.typeOf(extra.expected_value); - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx }); @@ -10433,6 +10462,7 @@ fn atomicOp( rmw_op: ?std.builtin.AtomicRmwOp, order: std.builtin.AtomicOrder, ) InnerError!MCValue { + const mod = self.bin_file.options.module.?; const ptr_lock = switch (ptr_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -10445,7 +10475,7 @@ fn atomicOp( }; defer if (val_lock) |lock| self.register_manager.unlockReg(lock); - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); const ptr_size = Memory.PtrSize.fromSize(val_abi_size); const ptr_mem = switch (ptr_mcv) { .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size), @@ -10539,8 +10569,8 @@ fn atomicOp( .Or => try self.genBinOpMir(.{ ._, .@"or" }, val_ty, tmp_mcv, val_mcv), .Xor => try self.genBinOpMir(.{ ._, .xor }, val_ty, tmp_mcv, val_mcv), .Min, .Max => { - const cc: Condition = switch (if (val_ty.isAbiInt()) - val_ty.intInfo(self.target.*).signedness + const cc: Condition = switch (if (val_ty.isAbiInt(mod)) + val_ty.intInfo(mod).signedness else .unsigned) { .unsigned => switch (op) { @@ -10728,6 +10758,7 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr } fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { + const mod = self.bin_file.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -10752,7 +10783,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { }; defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock); - const elem_abi_size = @intCast(u31, elem_ty.abiSize(self.target.*)); + const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod)); if (elem_abi_size == 1) { const ptr: MCValue = switch (dst_ptr_ty.ptrSize()) { @@ -10897,8 +10928,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { // We need a properly aligned and sized call frame to be able to call this function. { const needed_call_frame = FrameAlloc.init(.{ - .size = inst_ty.abiSize(self.target.*), - .alignment = inst_ty.abiAlignment(self.target.*), + .size = inst_ty.abiSize(mod), + .alignment = inst_ty.abiAlignment(mod), }); const frame_allocs_slice = self.frame_allocs.slice(); const stack_frame_size = @@ -11013,14 +11044,15 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { } fn airSplat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const vector_ty = self.air.typeOfIndex(inst); - const dst_rc = regClassForType(vector_ty); - const scalar_ty = vector_ty.scalarType(); + const dst_rc = regClassForType(vector_ty, mod); + const scalar_ty = vector_ty.scalarType(mod); const src_mcv = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - switch (scalar_ty.zigTypeTag()) { + switch (scalar_ty.zigTypeTag(mod)) { else => {}, .Float => switch (scalar_ty.floatBits(self.target.*)) { 32 => switch (vector_ty.vectorLen()) { @@ -11233,36 +11265,37 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result_ty = self.air.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = result: { - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Struct => { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(result_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); if (result_ty.containerLayout() == .Packed) { const struct_obj = result_ty.castTag(.@"struct").?.data; try self.genInlineMemset( .{ .lea_frame = .{ .index = frame_index } }, .{ .immediate = 0 }, - .{ .immediate = result_ty.abiSize(self.target.*) }, + .{ .immediate = result_ty.abiSize(mod) }, ); for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(elem_i) != null) continue; + if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); - const elem_bit_size = @intCast(u32, elem_ty.bitSize(self.target.*)); + const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); if (elem_bit_size > 64) { return self.fail( "TODO airAggregateInit implement packed structs with large fields", .{}, ); } - const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); const elem_abi_bits = elem_abi_size * 8; - const elem_off = struct_obj.packedFieldBitOffset(self.target.*, elem_i); + const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i); const elem_byte_off = @intCast(i32, elem_off / elem_abi_bits * elem_abi_size); const elem_bit_off = elem_off % elem_abi_bits; const elem_mcv = try self.resolveInst(elem); @@ -11322,10 +11355,10 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } } } else for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(elem_i) != null) continue; + if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); - const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, self.target.*)); + const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, @@ -11337,9 +11370,9 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { }, .Array => { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(result_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); const elem_ty = result_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); for (elements, 0..) |elem, elem_i| { const elem_mcv = try self.resolveInst(elem); @@ -11374,11 +11407,12 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const result: MCValue = result: { const union_ty = self.air.typeOfIndex(inst); - const layout = union_ty.unionGetLayout(self.target.*); + const layout = union_ty.unionGetLayout(mod); const src_ty = self.air.typeOf(extra.init); const src_mcv = try self.resolveInst(extra.init); @@ -11400,7 +11434,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const tag_val = Value.initPayload(&tag_pl.base); var tag_int_pl: Value.Payload.U64 = undefined; const tag_int_val = tag_val.enumToInt(tag_ty, &tag_int_pl); - const tag_int = tag_int_val.toUnsignedInt(self.target.*); + const tag_int = tag_int_val.toUnsignedInt(mod); const tag_off = if (layout.tag_align < layout.payload_align) @intCast(i32, layout.payload_size) else @@ -11424,6 +11458,7 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { } fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const ty = self.air.typeOfIndex(inst); @@ -11466,14 +11501,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const mir_tag = if (@as( ?Mir.Inst.FixedTag, if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or mem.eql(u2, &order, &.{ 3, 1, 2 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd132 }, 64 => .{ .v_sd, .fmadd132 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { 1 => .{ .v_ss, .fmadd132 }, @@ -11493,14 +11528,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, } else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd213 }, 64 => .{ .v_sd, .fmadd213 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { 1 => .{ .v_ss, .fmadd213 }, @@ -11520,14 +11555,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, } else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd231 }, 64 => .{ .v_sd, .fmadd231 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { 1 => .{ .v_ss, .fmadd231 }, @@ -11555,7 +11590,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { var mops: [3]MCValue = undefined; for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv; - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const mop1_reg = registerAlias(mops[0].getReg().?, abi_size); const mop2_reg = registerAlias(mops[1].getReg().?, abi_size); if (mops[2].isRegister()) try self.asmRegisterRegisterRegister( @@ -11573,10 +11608,11 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { + const mod = self.bin_file.options.module.?; const ty = self.air.typeOf(ref); // If the type has no codegen bits, no need to store it. - if (!ty.hasRuntimeBitsIgnoreComptime()) return .none; + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; if (Air.refToIndex(ref)) |inst| { const mcv = switch (self.air.instructions.items(.tag)[inst]) { @@ -11584,7 +11620,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const gop = try self.const_tracking.getOrPut(self.gpa, inst); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref).?, + .val = self.air.value(ref, mod).?, })); break :tracking gop.value_ptr; }, @@ -11597,7 +11633,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } } - return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref).? }); + return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref, mod).? }); } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { @@ -11670,6 +11706,7 @@ fn resolveCallingConventionValues( var_args: []const Air.Inst.Ref, stack_frame_base: FrameIndex, ) !CallMCValues { + const mod = self.bin_file.options.module.?; const cc = fn_ty.fnCallingConvention(); const param_len = fn_ty.fnParamLen(); const param_types = try self.gpa.alloc(Type, param_len + var_args.len); @@ -11702,21 +11739,21 @@ fn resolveCallingConventionValues( switch (self.target.os.tag) { .windows => { // Align the stack to 16bytes before allocating shadow stack space (if any). - result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(self.target.*)); + result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(mod)); }, else => {}, } // Return values - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { // TODO: is this even possible for C calling convention? result.return_value = InstTracking.init(.none); } else { const classes = switch (self.target.os.tag) { - .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, self.target.*)}, - else => mem.sliceTo(&abi.classifySystemV(ret_ty, self.target.*, .ret), .none), + .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, mod)}, + else => mem.sliceTo(&abi.classifySystemV(ret_ty, mod, .ret), .none), }; if (classes.len > 1) { return self.fail("TODO handle multiple classes per type", .{}); @@ -11725,7 +11762,7 @@ fn resolveCallingConventionValues( result.return_value = switch (classes[0]) { .integer => InstTracking.init(.{ .register = registerAlias( ret_reg, - @intCast(u32, ret_ty.abiSize(self.target.*)), + @intCast(u32, ret_ty.abiSize(mod)), ) }), .float, .sse => InstTracking.init(.{ .register = .xmm0 }), .memory => ret: { @@ -11744,11 +11781,11 @@ fn resolveCallingConventionValues( // Input params for (param_types, result.args) |ty, *arg| { - assert(ty.hasRuntimeBitsIgnoreComptime()); + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); const classes = switch (self.target.os.tag) { - .windows => &[1]abi.Class{abi.classifyWindows(ty, self.target.*)}, - else => mem.sliceTo(&abi.classifySystemV(ty, self.target.*, .arg), .none), + .windows => &[1]abi.Class{abi.classifyWindows(ty, mod)}, + else => mem.sliceTo(&abi.classifySystemV(ty, mod, .arg), .none), }; if (classes.len > 1) { return self.fail("TODO handle multiple classes per type", .{}); @@ -11783,8 +11820,8 @@ fn resolveCallingConventionValues( }), } - const param_size = @intCast(u31, ty.abiSize(self.target.*)); - const param_align = @intCast(u31, ty.abiAlignment(self.target.*)); + const param_size = @intCast(u31, ty.abiSize(mod)); + const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11798,13 +11835,13 @@ fn resolveCallingConventionValues( result.stack_align = 16; // Return values - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { result.return_value = InstTracking.init(.none); } else { const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0]; - const ret_ty_size = @intCast(u31, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u31, ret_ty.abiSize(mod)); if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) { const aliased_reg = registerAlias(ret_reg, ret_ty_size); result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none }; @@ -11819,12 +11856,12 @@ fn resolveCallingConventionValues( // Input params for (param_types, result.args) |ty, *arg| { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { arg.* = .none; continue; } - const param_size = @intCast(u31, ty.abiSize(self.target.*)); - const param_align = @intCast(u31, ty.abiAlignment(self.target.*)); + const param_size = @intCast(u31, ty.abiSize(mod)); + const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11908,9 +11945,10 @@ fn registerAlias(reg: Register, size_bytes: u32) Register { /// Truncates the value in the register in place. /// Clobbers any remaining bits. fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { - const int_info = if (ty.isAbiInt()) ty.intInfo(self.target.*) else std.builtin.Type.Int{ + const mod = self.bin_file.options.module.?; + const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(self.target.*)), + .bits = @intCast(u16, ty.bitSize(mod)), }; const max_reg_bit_width = Register.rax.bitSize(); switch (int_info.signedness) { @@ -11953,8 +11991,9 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { } fn regBitSize(self: *Self, ty: Type) u64 { - const abi_size = ty.abiSize(self.target.*); - return switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); + return switch (ty.zigTypeTag(mod)) { else => switch (abi_size) { 1 => 8, 2 => 16, @@ -11971,7 +12010,8 @@ fn regBitSize(self: *Self, ty: Type) u64 { } fn regExtraBits(self: *Self, ty: Type) u64 { - return self.regBitSize(ty) - ty.bitSize(self.target.*); + const mod = self.bin_file.options.module.?; + return self.regBitSize(ty) - ty.bitSize(mod); } fn hasFeature(self: *Self, feature: Target.x86.Feature) bool { diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index e79424d6d872..c8d20c73fa25 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -1,10 +1,3 @@ -const std = @import("std"); -const Type = @import("../../type.zig").Type; -const Target = std.Target; -const assert = std.debug.assert; -const Register = @import("bits.zig").Register; -const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; - pub const Class = enum { integer, sse, @@ -19,7 +12,7 @@ pub const Class = enum { float_combine, }; -pub fn classifyWindows(ty: Type, target: Target) Class { +pub fn classifyWindows(ty: Type, mod: *const Module) Class { // https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017 // "There's a strict one-to-one correspondence between a function call's arguments // and the registers used for those arguments. Any argument that doesn't fit in 8 @@ -28,7 +21,7 @@ pub fn classifyWindows(ty: Type, target: Target) Class { // "All floating point operations are done using the 16 XMM registers." // "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed // as if they were integers of the same size." - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Pointer, .Int, .Bool, @@ -43,10 +36,10 @@ pub fn classifyWindows(ty: Type, target: Target) Class { .ErrorUnion, .AnyFrame, .Frame, - => switch (ty.abiSize(target)) { + => switch (ty.abiSize(mod)) { 0 => unreachable, 1, 2, 4, 8 => return .integer, - else => switch (ty.zigTypeTag()) { + else => switch (ty.zigTypeTag(mod)) { .Int => return .win_i128, .Struct, .Union => if (ty.containerLayout() == .Packed) { return .win_i128; @@ -75,13 +68,14 @@ pub const Context = enum { ret, arg, other }; /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { +pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class { + const target = mod.getTarget(); const memory_class = [_]Class{ .memory, .none, .none, .none, .none, .none, .none, .none, }; var result = [1]Class{.none} ** 8; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Pointer => switch (ty.ptrSize()) { .Slice => { result[0] = .integer; @@ -94,7 +88,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { }, }, .Int, .Enum, .ErrorSet => { - const bits = ty.intInfo(target).bits; + const bits = ty.intInfo(mod).bits; if (bits <= 64) { result[0] = .integer; return result; @@ -165,7 +159,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { }, .Vector => { const elem_ty = ty.childType(); - const bits = elem_ty.bitSize(target) * ty.arrayLen(); + const bits = elem_ty.bitSize(mod) * ty.arrayLen(); if (bits <= 64) return .{ .sse, .none, .none, .none, .none, .none, .none, .none, @@ -204,7 +198,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { return memory_class; }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { result[0] = .integer; return result; } @@ -215,7 +209,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { // it contains unaligned fields, it has class MEMORY" // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". - const ty_size = ty.abiSize(target); + const ty_size = ty.abiSize(mod); if (ty.containerLayout() == .Packed) { assert(ty_size <= 128); result[0] = .integer; @@ -230,12 +224,12 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { const fields = ty.structFields(); for (fields.values()) |field| { if (field.abi_align != 0) { - if (field.abi_align < field.ty.abiAlignment(target)) { + if (field.abi_align < field.ty.abiAlignment(mod)) { return memory_class; } } - const field_size = field.ty.abiSize(target); - const field_class_array = classifySystemV(field.ty, target, .other); + const field_size = field.ty.abiSize(mod); + const field_class_array = classifySystemV(field.ty, mod, .other); const field_class = std.mem.sliceTo(&field_class_array, .none); if (byte_i + field_size <= 8) { // Combine this field with the previous one. @@ -334,7 +328,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { // it contains unaligned fields, it has class MEMORY" // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". - const ty_size = ty.abiSize(target); + const ty_size = ty.abiSize(mod); if (ty.containerLayout() == .Packed) { assert(ty_size <= 128); result[0] = .integer; @@ -347,12 +341,12 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { const fields = ty.unionFields(); for (fields.values()) |field| { if (field.abi_align != 0) { - if (field.abi_align < field.ty.abiAlignment(target)) { + if (field.abi_align < field.ty.abiAlignment(mod)) { return memory_class; } } // Combine this field with the previous one. - const field_class = classifySystemV(field.ty, target, .other); + const field_class = classifySystemV(field.ty, mod, .other); for (&result, 0..) |*result_item, i| { const field_item = field_class[i]; // "If both classes are equal, this is the resulting class." @@ -426,7 +420,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { return result; }, .Array => { - const ty_size = ty.abiSize(target); + const ty_size = ty.abiSize(mod); if (ty_size <= 64) { result[0] = .integer; return result; @@ -527,10 +521,17 @@ pub const RegisterClass = struct { }; }; +const builtin = @import("builtin"); +const std = @import("std"); +const Target = std.Target; +const assert = std.debug.assert; const testing = std.testing; + const Module = @import("../../Module.zig"); +const Register = @import("bits.zig").Register; +const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; +const Type = @import("../../type.zig").Type; const Value = @import("../../value.zig").Value; -const builtin = @import("builtin"); fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field { return .{ @@ -541,34 +542,3 @@ fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field { .is_comptime = false, }; } - -test "C_C_D" { - var fields = Module.Struct.Fields{}; - // const C_C_D = extern struct { v1: i8, v2: i8, v3: f64 }; - try fields.ensureTotalCapacity(testing.allocator, 3); - defer fields.deinit(testing.allocator); - fields.putAssumeCapacity("v1", _field(.i8, 0)); - fields.putAssumeCapacity("v2", _field(.i8, 1)); - fields.putAssumeCapacity("v3", _field(.f64, 4)); - - var C_C_D_struct = Module.Struct{ - .fields = fields, - .namespace = undefined, - .owner_decl = undefined, - .zir_index = undefined, - .layout = .Extern, - .status = .fully_resolved, - .known_non_opv = true, - .is_tuple = false, - }; - var C_C_D = Type.Payload.Struct{ .data = &C_C_D_struct }; - - try testing.expectEqual( - [_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none }, - classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .ret), - ); - try testing.expectEqual( - [_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none }, - classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .arg), - ); -} diff --git a/src/codegen.zig b/src/codegen.zig index adce183833d2..6846bebe6b17 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -154,7 +154,7 @@ pub fn generateLazySymbol( } mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); return Result.ok; - } else if (lazy_sym.ty.zigTypeTag() == .Enum) { + } else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) { alignment.* = 1; for (lazy_sym.ty.enumFields().keys()) |tag_name| { try code.ensureUnusedCapacity(tag_name.len + 1); @@ -186,22 +186,22 @@ pub fn generateSymbol( typed_value.val = rt.data; } - const target = bin_file.options.target; + const mod = bin_file.options.module.?; + const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - const mod = bin_file.options.module.?; log.debug("generateSymbol: ty = {}, val = {}", .{ typed_value.ty.fmt(mod), typed_value.val.fmtValue(typed_value.ty, mod), }); if (typed_value.val.isUndefDeep()) { - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0xaa, abi_size); return Result.ok; } - switch (typed_value.ty.zigTypeTag()) { + switch (typed_value.ty.zigTypeTag(mod)) { .Fn => { return Result{ .fail = try ErrorMsg.create( @@ -219,7 +219,7 @@ pub fn generateSymbol( 64 => writeFloat(f64, typed_value.val.toFloat(f64), target, endian, try code.addManyAsArray(8)), 80 => { writeFloat(f80, typed_value.val.toFloat(f80), target, endian, try code.addManyAsArray(10)); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0, abi_size - 10); }, 128 => writeFloat(f128, typed_value.val.toFloat(f128), target, endian, try code.addManyAsArray(16)), @@ -242,7 +242,7 @@ pub fn generateSymbol( try code.ensureUnusedCapacity(bytes.len + 1); code.appendSliceAssumeCapacity(bytes); if (typed_value.ty.sentinel()) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(target)); + const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); code.appendAssumeCapacity(byte); } return Result.ok; @@ -330,11 +330,11 @@ pub fn generateSymbol( .zero, .one, .int_u64, .int_big_positive => { switch (target.ptrBitWidth()) { 32 => { - const x = typed_value.val.toUnsignedInt(target); + const x = typed_value.val.toUnsignedInt(mod); mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); }, 64 => { - const x = typed_value.val.toUnsignedInt(target); + const x = typed_value.val.toUnsignedInt(mod); mem.writeInt(u64, try code.addManyAsArray(8), x, endian); }, else => unreachable, @@ -399,19 +399,19 @@ pub fn generateSymbol( }, }, .Int => { - const info = typed_value.ty.intInfo(target); + const info = typed_value.ty.intInfo(mod); if (info.bits <= 8) { const x: u8 = switch (info.signedness) { - .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(target)), - .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(target))), + .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(mod)), + .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(mod))), }; try code.append(x); return Result.ok; } if (info.bits > 64) { var bigint_buffer: Value.BigIntSpace = undefined; - const bigint = typed_value.val.toBigInt(&bigint_buffer, target); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const bigint = typed_value.val.toBigInt(&bigint_buffer, mod); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; const start = code.items.len; try code.resize(start + abi_size); bigint.writeTwosComplement(code.items[start..][0..abi_size], endian); @@ -420,25 +420,25 @@ pub fn generateSymbol( switch (info.signedness) { .unsigned => { if (info.bits <= 16) { - const x = @intCast(u16, typed_value.val.toUnsignedInt(target)); + const x = @intCast(u16, typed_value.val.toUnsignedInt(mod)); mem.writeInt(u16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { - const x = @intCast(u32, typed_value.val.toUnsignedInt(target)); + const x = @intCast(u32, typed_value.val.toUnsignedInt(mod)); mem.writeInt(u32, try code.addManyAsArray(4), x, endian); } else { - const x = typed_value.val.toUnsignedInt(target); + const x = typed_value.val.toUnsignedInt(mod); mem.writeInt(u64, try code.addManyAsArray(8), x, endian); } }, .signed => { if (info.bits <= 16) { - const x = @intCast(i16, typed_value.val.toSignedInt(target)); + const x = @intCast(i16, typed_value.val.toSignedInt(mod)); mem.writeInt(i16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { - const x = @intCast(i32, typed_value.val.toSignedInt(target)); + const x = @intCast(i32, typed_value.val.toSignedInt(mod)); mem.writeInt(i32, try code.addManyAsArray(4), x, endian); } else { - const x = typed_value.val.toSignedInt(target); + const x = typed_value.val.toSignedInt(mod); mem.writeInt(i64, try code.addManyAsArray(8), x, endian); } }, @@ -449,9 +449,9 @@ pub fn generateSymbol( var int_buffer: Value.Payload.U64 = undefined; const int_val = typed_value.enumToInt(&int_buffer); - const info = typed_value.ty.intInfo(target); + const info = typed_value.ty.intInfo(mod); if (info.bits <= 8) { - const x = @intCast(u8, int_val.toUnsignedInt(target)); + const x = @intCast(u8, int_val.toUnsignedInt(mod)); try code.append(x); return Result.ok; } @@ -468,25 +468,25 @@ pub fn generateSymbol( switch (info.signedness) { .unsigned => { if (info.bits <= 16) { - const x = @intCast(u16, int_val.toUnsignedInt(target)); + const x = @intCast(u16, int_val.toUnsignedInt(mod)); mem.writeInt(u16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { - const x = @intCast(u32, int_val.toUnsignedInt(target)); + const x = @intCast(u32, int_val.toUnsignedInt(mod)); mem.writeInt(u32, try code.addManyAsArray(4), x, endian); } else { - const x = int_val.toUnsignedInt(target); + const x = int_val.toUnsignedInt(mod); mem.writeInt(u64, try code.addManyAsArray(8), x, endian); } }, .signed => { if (info.bits <= 16) { - const x = @intCast(i16, int_val.toSignedInt(target)); + const x = @intCast(i16, int_val.toSignedInt(mod)); mem.writeInt(i16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { - const x = @intCast(i32, int_val.toSignedInt(target)); + const x = @intCast(i32, int_val.toSignedInt(mod)); mem.writeInt(i32, try code.addManyAsArray(4), x, endian); } else { - const x = int_val.toSignedInt(target); + const x = int_val.toSignedInt(mod); mem.writeInt(i64, try code.addManyAsArray(8), x, endian); } }, @@ -503,7 +503,7 @@ pub fn generateSymbol( const struct_obj = typed_value.ty.castTag(.@"struct").?.data; const fields = struct_obj.fields.values(); const field_vals = typed_value.val.castTag(.aggregate).?.data; - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; const current_pos = code.items.len; try code.resize(current_pos + abi_size); var bits: u16 = 0; @@ -512,8 +512,8 @@ pub fn generateSymbol( const field_ty = fields[index].ty; // pointer may point to a decl which must be marked used // but can also result in a relocation. Therefore we handle those seperately. - if (field_ty.zigTypeTag() == .Pointer) { - const field_size = math.cast(usize, field_ty.abiSize(target)) orelse return error.Overflow; + if (field_ty.zigTypeTag(mod) == .Pointer) { + const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow; var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); defer tmp_list.deinit(); switch (try generateSymbol(bin_file, src_loc, .{ @@ -526,7 +526,7 @@ pub fn generateSymbol( } else { field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; } - bits += @intCast(u16, field_ty.bitSize(target)); + bits += @intCast(u16, field_ty.bitSize(mod)); } return Result.ok; @@ -536,7 +536,7 @@ pub fn generateSymbol( const field_vals = typed_value.val.castTag(.aggregate).?.data; for (field_vals, 0..) |field_val, index| { const field_ty = typed_value.ty.structFieldType(index); - if (!field_ty.hasRuntimeBits()) continue; + if (!field_ty.hasRuntimeBits(mod)) continue; switch (try generateSymbol(bin_file, src_loc, .{ .ty = field_ty, @@ -548,7 +548,7 @@ pub fn generateSymbol( const unpadded_field_end = code.items.len - struct_begin; // Pad struct members if required - const padded_field_end = typed_value.ty.structFieldOffset(index + 1, target); + const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; if (padding > 0) { @@ -560,7 +560,7 @@ pub fn generateSymbol( }, .Union => { const union_obj = typed_value.val.castTag(.@"union").?.data; - const layout = typed_value.ty.unionGetLayout(target); + const layout = typed_value.ty.unionGetLayout(mod); if (layout.payload_size == 0) { return generateSymbol(bin_file, src_loc, .{ @@ -584,7 +584,7 @@ pub fn generateSymbol( const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?; assert(union_ty.haveFieldTypes()); const field_ty = union_ty.fields.values()[field_index].ty; - if (!field_ty.hasRuntimeBits()) { + if (!field_ty.hasRuntimeBits(mod)) { try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); } else { switch (try generateSymbol(bin_file, src_loc, .{ @@ -595,7 +595,7 @@ pub fn generateSymbol( .fail => |em| return Result{ .fail = em }, } - const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(target)) orelse return error.Overflow; + const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow; if (padding > 0) { try code.writer().writeByteNTimes(0, padding); } @@ -620,15 +620,15 @@ pub fn generateSymbol( .Optional => { var opt_buf: Type.Payload.ElemType = undefined; const payload_type = typed_value.ty.optionalChild(&opt_buf); - const is_pl = !typed_value.val.isNull(); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const is_pl = !typed_value.val.isNull(mod); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - if (!payload_type.hasRuntimeBits()) { + if (!payload_type.hasRuntimeBits(mod)) { try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size); return Result.ok; } - if (typed_value.ty.optionalReprIsPayload()) { + if (typed_value.ty.optionalReprIsPayload(mod)) { if (typed_value.val.castTag(.opt_payload)) |payload| { switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, @@ -637,7 +637,7 @@ pub fn generateSymbol( .ok => {}, .fail => |em| return Result{ .fail = em }, } - } else if (!typed_value.val.isNull()) { + } else if (!typed_value.val.isNull(mod)) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, .val = typed_value.val, @@ -652,7 +652,7 @@ pub fn generateSymbol( return Result.ok; } - const padding = abi_size - (math.cast(usize, payload_type.abiSize(target)) orelse return error.Overflow) - 1; + const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef); switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, @@ -671,7 +671,7 @@ pub fn generateSymbol( const payload_ty = typed_value.ty.errorUnionPayload(); const is_payload = typed_value.val.errorUnionIsPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const err_val = if (is_payload) Value.initTag(.zero) else typed_value.val; return generateSymbol(bin_file, src_loc, .{ .ty = error_ty, @@ -679,9 +679,9 @@ pub fn generateSymbol( }, code, debug_output, reloc_info); } - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - const abi_align = typed_value.ty.abiAlignment(target); + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const abi_align = typed_value.ty.abiAlignment(mod); // error value first when its type is larger than the error union's payload if (error_align > payload_align) { @@ -743,7 +743,7 @@ pub fn generateSymbol( try code.writer().writeInt(u32, kv.value, endian); }, else => { - try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(target))); + try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(mod))); }, } return Result.ok; @@ -752,7 +752,7 @@ pub fn generateSymbol( .bytes => { const bytes = typed_value.val.castTag(.bytes).?.data; const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - len) orelse + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse return error.Overflow; try code.ensureUnusedCapacity(len + padding); code.appendSliceAssumeCapacity(bytes[0..len]); @@ -763,8 +763,8 @@ pub fn generateSymbol( const elem_vals = typed_value.val.castTag(.aggregate).?.data; const elem_ty = typed_value.ty.elemType(); const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - - (math.divCeil(u64, elem_ty.bitSize(target) * len, 8) catch |err| switch (err) { + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - + (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, else => |e| return e, })) orelse return error.Overflow; @@ -784,8 +784,8 @@ pub fn generateSymbol( const array = typed_value.val.castTag(.repeated).?.data; const elem_ty = typed_value.ty.childType(); const len = typed_value.ty.arrayLen(); - const padding = math.cast(usize, typed_value.ty.abiSize(target) - - (math.divCeil(u64, elem_ty.bitSize(target) * len, 8) catch |err| switch (err) { + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - + (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, else => |e| return e, })) orelse return error.Overflow; @@ -805,7 +805,7 @@ pub fn generateSymbol( .str_lit => { const str_lit = typed_value.val.castTag(.str_lit).?.data; const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - str_lit.len) orelse + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse return error.Overflow; try code.ensureUnusedCapacity(str_lit.len + padding); code.appendSliceAssumeCapacity(bytes); @@ -832,7 +832,7 @@ fn lowerParentPtr( debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { - const target = bin_file.options.target; + const mod = bin_file.options.module.?; switch (parent_ptr.tag()) { .field_ptr => { const field_ptr = parent_ptr.castTag(.field_ptr).?.data; @@ -843,19 +843,19 @@ fn lowerParentPtr( field_ptr.container_ptr, code, debug_output, - reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag()) { + reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag(mod)) { .Pointer => offset: { assert(field_ptr.container_ty.isSlice()); var buf: Type.SlicePtrFieldTypeBuffer = undefined; break :offset switch (field_ptr.field_index) { 0 => 0, - 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(target), + 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(mod), else => unreachable, }; }, .Struct, .Union => field_ptr.container_ty.structFieldOffset( field_ptr.field_index, - target, + mod, ), else => return Result{ .fail = try ErrorMsg.create( bin_file.allocator, @@ -875,7 +875,7 @@ fn lowerParentPtr( elem_ptr.array_ptr, code, debug_output, - reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(target))), + reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(mod))), ); }, .opt_payload_ptr => { @@ -900,7 +900,7 @@ fn lowerParentPtr( eu_payload_ptr.container_ptr, code, debug_output, - reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, target))), + reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, mod))), ); }, .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( @@ -945,7 +945,7 @@ fn lowerDeclRef( reloc_info: RelocInfo, ) CodeGenError!Result { const target = bin_file.options.target; - const module = bin_file.options.module.?; + const mod = bin_file.options.module.?; if (typed_value.ty.isSlice()) { // generate ptr var buf: Type.SlicePtrFieldTypeBuffer = undefined; @@ -961,7 +961,7 @@ fn lowerDeclRef( // generate length var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = typed_value.val.sliceLen(module), + .data = typed_value.val.sliceLen(mod), }; switch (try generateSymbol(bin_file, src_loc, .{ .ty = Type.usize, @@ -975,14 +975,14 @@ fn lowerDeclRef( } const ptr_width = target.ptrBitWidth(); - const decl = module.declPtr(decl_index); - const is_fn_body = decl.ty.zigTypeTag() == .Fn; - if (!is_fn_body and !decl.ty.hasRuntimeBits()) { + const decl = mod.declPtr(decl_index); + const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; + if (!is_fn_body and !decl.ty.hasRuntimeBits(mod)) { try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8)); return Result.ok; } - module.markDeclAlive(decl); + mod.markDeclAlive(decl); const vaddr = try bin_file.getDeclVAddr(decl_index, .{ .parent_atom_index = reloc_info.parent_atom_index, @@ -1059,16 +1059,16 @@ fn genDeclRef( tv: TypedValue, decl_index: Module.Decl.Index, ) CodeGenError!GenResult { - const module = bin_file.options.module.?; - log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(module), tv.val.fmtValue(tv.ty, module) }); + const mod = bin_file.options.module.?; + log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(mod), tv.val.fmtValue(tv.ty, mod) }); const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); - if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { const imm: u64 = switch (ptr_bytes) { 1 => 0xaa, 2 => 0xaaaa, @@ -1080,20 +1080,20 @@ fn genDeclRef( } // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? - if (tv.ty.castPtrToFn()) |fn_ty| { + if (tv.ty.castPtrToFn(mod)) |fn_ty| { if (fn_ty.fnInfo().is_generic) { - return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(target) }); + return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) }); } - } else if (tv.ty.zigTypeTag() == .Pointer) { - const elem_ty = tv.ty.elemType2(); - if (!elem_ty.hasRuntimeBits()) { - return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(target) }); + } else if (tv.ty.zigTypeTag(mod) == .Pointer) { + const elem_ty = tv.ty.elemType2(mod); + if (!elem_ty.hasRuntimeBits(mod)) { + return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(mod) }); } } - module.markDeclAlive(decl); + mod.markDeclAlive(decl); - const is_threadlocal = tv.val.isPtrToThreadLocal(module) and !bin_file.options.single_threaded; + const is_threadlocal = tv.val.isPtrToThreadLocal(mod) and !bin_file.options.single_threaded; if (bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index); @@ -1186,7 +1186,7 @@ pub fn genTypedValue( } } - switch (typed_value.ty.zigTypeTag()) { + switch (typed_value.ty.zigTypeTag(mod)) { .Void => return GenResult.mcv(.none), .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => {}, @@ -1196,18 +1196,18 @@ pub fn genTypedValue( return GenResult.mcv(.{ .immediate = 0 }); }, .int_u64 => { - return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) }); + return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) }); }, else => {}, } }, }, .Int => { - const info = typed_value.ty.intInfo(target); + const info = typed_value.ty.intInfo(mod); if (info.bits <= ptr_bits) { const unsigned = switch (info.signedness) { - .signed => @bitCast(u64, typed_value.val.toSignedInt(target)), - .unsigned => typed_value.val.toUnsignedInt(target), + .signed => @bitCast(u64, typed_value.val.toSignedInt(mod)), + .unsigned => typed_value.val.toUnsignedInt(mod), }; return GenResult.mcv(.{ .immediate = unsigned }); } @@ -1216,7 +1216,7 @@ pub fn genTypedValue( return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) }); }, .Optional => { - if (typed_value.ty.isPtrLikeOptional()) { + if (typed_value.ty.isPtrLikeOptional(mod)) { if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 }); var buf: Type.Payload.ElemType = undefined; @@ -1224,8 +1224,8 @@ pub fn genTypedValue( .ty = typed_value.ty.optionalChild(&buf), .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val, }, owner_decl_index); - } else if (typed_value.ty.abiSize(target) == 1) { - return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull()) }); + } else if (typed_value.ty.abiSize(mod) == 1) { + return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull(mod)) }); } }, .Enum => { @@ -1241,9 +1241,8 @@ pub fn genTypedValue( typed_value.ty.cast(Type.Payload.EnumFull).?.data.values; if (enum_values.count() != 0) { const tag_val = enum_values.keys()[field_index.data]; - var buf: Type.Payload.Bits = undefined; return genTypedValue(bin_file, src_loc, .{ - .ty = typed_value.ty.intTagType(&buf), + .ty = typed_value.ty.intTagType(), .val = tag_val, }, owner_decl_index); } else { @@ -1253,8 +1252,7 @@ pub fn genTypedValue( else => unreachable, } } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer); + const int_tag_ty = typed_value.ty.intTagType(); return genTypedValue(bin_file, src_loc, .{ .ty = int_tag_ty, .val = typed_value.val, @@ -1281,7 +1279,7 @@ pub fn genTypedValue( const payload_type = typed_value.ty.errorUnionPayload(); const is_pl = typed_value.val.errorUnionIsPayload(); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); return genTypedValue(bin_file, src_loc, .{ @@ -1306,23 +1304,23 @@ pub fn genTypedValue( return genUnnamedConst(bin_file, src_loc, typed_value, owner_decl_index); } -pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0; - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime()) { +pub fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return 0; } else { - return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(target), payload_align); + return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(mod), payload_align); } } -pub fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0; - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime()) { - return mem.alignForwardGeneric(u64, payload_ty.abiSize(target), error_align); +pub fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return mem.alignForwardGeneric(u64, payload_ty.abiSize(mod), error_align); } else { return 0; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 86b74b14294a..da040a6fbbcd 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -16,6 +16,7 @@ const trace = @import("../tracy.zig").trace; const LazySrcLoc = Module.LazySrcLoc; const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); +const InternPool = @import("../InternPool.zig"); const BigIntLimb = std.math.big.Limb; const BigInt = std.math.big.int; @@ -285,10 +286,11 @@ pub const Function = struct { const gop = try f.value_map.getOrPut(inst); if (gop.found_existing) return gop.value_ptr.*; - const val = f.air.value(ref).?; + const mod = f.object.dg.module; + const val = f.air.value(ref, mod).?; const ty = f.air.typeOf(ref); - const result: CValue = if (lowersToArray(ty, f.object.dg.module.getTarget())) result: { + const result: CValue = if (lowersToArray(ty, mod)) result: { const writer = f.object.code_header.writer(); const alignment = 0; const decl_c_value = try f.allocLocalValue(ty, alignment); @@ -318,11 +320,11 @@ pub const Function = struct { /// those which go into `allocs`. This function does not add the resulting local into `allocs`; /// that responsibility lies with the caller. fn allocLocalValue(f: *Function, ty: Type, alignment: u32) !CValue { + const mod = f.object.dg.module; const gpa = f.object.dg.gpa; - const target = f.object.dg.module.getTarget(); try f.locals.append(gpa, .{ .cty_idx = try f.typeToIndex(ty, .complete), - .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), + .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)), }); return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) }; } @@ -336,10 +338,10 @@ pub const Function = struct { /// Only allocates the local; does not print anything. Will attempt to re-use locals, so should /// not be used for persistent locals (i.e. those in `allocs`). fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue { - const target = f.object.dg.module.getTarget(); + const mod = f.object.dg.module; if (f.free_locals_map.getPtr(.{ .cty_idx = try f.typeToIndex(ty, .complete), - .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), + .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)), })) |locals_list| { if (locals_list.popOrNull()) |local_entry| { return .{ .new_local = local_entry.key }; @@ -352,8 +354,9 @@ pub const Function = struct { fn writeCValue(f: *Function, w: anytype, c_value: CValue, location: ValueRenderLocation) !void { switch (c_value) { .constant => |inst| { + const mod = f.object.dg.module; const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const val = f.air.value(inst, mod).?; return f.object.dg.renderValue(w, ty, val, location); }, .undef => |ty| return f.object.dg.renderValue(w, ty, Value.undef, location), @@ -364,8 +367,9 @@ pub const Function = struct { fn writeCValueDeref(f: *Function, w: anytype, c_value: CValue) !void { switch (c_value) { .constant => |inst| { + const mod = f.object.dg.module; const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const val = f.air.value(inst, mod).?; try w.writeAll("(*"); try f.object.dg.renderValue(w, ty, val, .Other); return w.writeByte(')'); @@ -377,8 +381,9 @@ pub const Function = struct { fn writeCValueMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .constant => |inst| { + const mod = f.object.dg.module; const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const val = f.air.value(inst, mod).?; try f.object.dg.renderValue(w, ty, val, .Other); try w.writeByte('.'); return f.writeCValue(w, member, .Other); @@ -390,8 +395,9 @@ pub const Function = struct { fn writeCValueDerefMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .constant => |inst| { + const mod = f.object.dg.module; const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const val = f.air.value(inst, mod).?; try w.writeByte('('); try f.object.dg.renderValue(w, ty, val, .Other); try w.writeAll(")->"); @@ -522,11 +528,12 @@ pub const DeclGen = struct { decl_index: Decl.Index, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const decl = dg.module.declPtr(decl_index); + const mod = dg.module; + const decl = mod.declPtr(decl_index); assert(decl.has_tv); // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. - if (ty.isPtrAtRuntime() and !decl.ty.isFnOrHasRuntimeBits()) { + if (ty.isPtrAtRuntime(mod) and !decl.ty.isFnOrHasRuntimeBits(mod)) { return dg.writeCValue(writer, .{ .undef = ty }); } @@ -553,7 +560,7 @@ pub const DeclGen = struct { var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = val.sliceLen(dg.module), + .data = val.sliceLen(mod), }; const len_val = Value.initPayload(&len_pl.base); @@ -568,7 +575,7 @@ pub const DeclGen = struct { // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug // somewhere and we should let the C compiler tell us about it. - const need_typecast = if (ty.castPtrToFn()) |_| false else !ty.eql(decl.ty, dg.module); + const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.eql(decl.ty, mod); if (need_typecast) { try writer.writeAll("(("); try dg.renderType(writer, ty); @@ -584,6 +591,8 @@ pub const DeclGen = struct { // // Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void { + const mod = dg.module; + if (!ptr_ty.isSlice()) { try writer.writeByte('('); try dg.renderType(writer, ptr_ty); @@ -601,7 +610,6 @@ pub const DeclGen = struct { try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location); }, .field_ptr => { - const target = dg.module.getTarget(); const field_ptr = ptr_val.castTag(.field_ptr).?.data; // Ensure complete type definition is visible before accessing fields. @@ -615,7 +623,7 @@ pub const DeclGen = struct { field_ptr.container_ty, ptr_ty, @intCast(u32, field_ptr.field_index), - target, + mod, )) { .begin => try dg.renderParentPtr( writer, @@ -714,19 +722,20 @@ pub const DeclGen = struct { if (val.castTag(.runtime_value)) |rt| { val = rt.data; } - const target = dg.module.getTarget(); + const mod = dg.module; + const target = mod.getTarget(); const initializer_type: ValueRenderLocation = switch (location) { .StaticInitializer => .StaticInitializer, else => .Initializer, }; - const safety_on = switch (dg.module.optimizeMode()) { + const safety_on = switch (mod.optimizeMode()) { .Debug, .ReleaseSafe => true, .ReleaseFast, .ReleaseSmall => false, }; if (val.isUndefDeep()) { - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Bool => { if (safety_on) { return writer.writeAll("0xaa"); @@ -737,8 +746,8 @@ pub const DeclGen = struct { .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val, location)}), .Float => { const bits = ty.floatBits(target); - var repr_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits }; - const repr_ty = Type.initPayload(&repr_pl.base); + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; try writer.writeAll("zig_cast_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -778,11 +787,11 @@ pub const DeclGen = struct { var opt_buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&opt_buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, Type.bool, val, location); } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { return dg.renderValue(writer, payload_ty, val, location); } @@ -811,7 +820,7 @@ pub const DeclGen = struct { for (0..ty.structFieldCount()) |field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBits()) continue; + if (!field_ty.hasRuntimeBits(mod)) continue; if (!empty) try writer.writeByte(','); try dg.renderValue(writer, field_ty, val, initializer_type); @@ -832,17 +841,17 @@ pub const DeclGen = struct { try writer.writeByte('{'); if (ty.unionTagTypeSafety()) |tag_ty| { - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); try dg.renderValue(writer, tag_ty, val, initializer_type); } - if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}'); + if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}'); if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } for (ty.unionFields().values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, val, initializer_type); break; } @@ -853,7 +862,7 @@ pub const DeclGen = struct { const payload_ty = ty.errorUnionPayload(); const error_ty = ty.errorUnionSet(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, val, location); } @@ -916,7 +925,7 @@ pub const DeclGen = struct { } unreachable; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Int => switch (val.tag()) { .field_ptr, .elem_ptr, @@ -931,8 +940,8 @@ pub const DeclGen = struct { const bits = ty.floatBits(target); const f128_val = val.toFloat(f128); - var repr_ty_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits }; - const repr_ty = Type.initPayload(&repr_ty_pl.base); + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; assert(bits <= 128); var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; @@ -1109,7 +1118,7 @@ pub const DeclGen = struct { }, else => unreachable, }; - const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(target)) else null; + const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; try writer.print("{s}", .{ fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen())], sentinel), }); @@ -1131,11 +1140,11 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target)); + const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try literal.writeChar(elem_val_u8); } if (ai.sentinel) |s| { - const s_u8 = @intCast(u8, s.toUnsignedInt(target)); + const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); if (s_u8 != 0) try literal.writeChar(s_u8); } try literal.end(); @@ -1145,7 +1154,7 @@ pub const DeclGen = struct { while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target)); + const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try writer.print("'\\x{x}'", .{elem_val_u8}); } if (ai.sentinel) |s| { @@ -1183,10 +1192,10 @@ pub const DeclGen = struct { const payload_ty = ty.optionalChild(&opt_buf); const is_null_val = Value.makeBool(val.tag() == .null_value); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return dg.renderValue(writer, Type.bool, is_null_val, location); - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else val; return dg.renderValue(writer, payload_ty, payload_val, location); } @@ -1218,7 +1227,7 @@ pub const DeclGen = struct { const error_ty = ty.errorUnionSet(); const error_val = if (val.errorUnionIsPayload()) Value.zero else val; - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, error_val, location); } @@ -1263,8 +1272,7 @@ pub const DeclGen = struct { } }, else => { - var int_tag_ty_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_ty_buffer); + const int_tag_ty = ty.intTagType(); return dg.renderValue(writer, int_tag_ty, val, location); }, } @@ -1295,7 +1303,7 @@ pub const DeclGen = struct { for (field_vals, 0..) |field_val, field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeByte(','); try dg.renderValue(writer, field_ty, field_val, initializer_type); @@ -1306,13 +1314,10 @@ pub const DeclGen = struct { }, .Packed => { const field_vals = val.castTag(.aggregate).?.data; - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bits = Type.smallestUnsignedBits(int_info.bits - 1); + const bit_offset_ty = try mod.intType(.unsigned, bits); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); @@ -1321,7 +1326,7 @@ pub const DeclGen = struct { for (0..field_vals.len) |field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; eff_num_fields += 1; } @@ -1330,7 +1335,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderValue(writer, ty, Value.undef, initializer_type); try writer.writeByte(')'); - } else if (ty.bitSize(target) > 64) { + } else if (ty.bitSize(mod) > 64) { // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) var num_or = eff_num_fields - 1; while (num_or > 0) : (num_or -= 1) { @@ -1344,7 +1349,7 @@ pub const DeclGen = struct { for (field_vals, 0..) |field_val, field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const cast_context = IntCastContext{ .value = .{ .value = field_val } }; if (bit_offset_val_pl.data != 0) { @@ -1362,7 +1367,7 @@ pub const DeclGen = struct { if (needs_closing_paren) try writer.writeByte(')'); if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); - bit_offset_val_pl.data += field_ty.bitSize(target); + bit_offset_val_pl.data += field_ty.bitSize(mod); needs_closing_paren = true; eff_index += 1; } @@ -1373,7 +1378,7 @@ pub const DeclGen = struct { for (field_vals, 0..) |field_val, field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeAll(" | "); try writer.writeByte('('); @@ -1388,7 +1393,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, field_ty, field_val, .Other); } - bit_offset_val_pl.data += field_ty.bitSize(target); + bit_offset_val_pl.data += field_ty.bitSize(mod); empty = false; } try writer.writeByte(')'); @@ -1408,12 +1413,12 @@ pub const DeclGen = struct { const field_ty = ty.unionFields().values()[field_i].ty; const field_name = ty.unionFields().keys()[field_i]; if (ty.containerLayout() == .Packed) { - if (field_ty.hasRuntimeBits()) { - if (field_ty.isPtrAtRuntime()) { + if (field_ty.hasRuntimeBits(mod)) { + if (field_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); - } else if (field_ty.zigTypeTag() == .Float) { + } else if (field_ty.zigTypeTag(mod) == .Float) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); @@ -1427,21 +1432,21 @@ pub const DeclGen = struct { try writer.writeByte('{'); if (ty.unionTagTypeSafety()) |tag_ty| { - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type); } - if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}'); + if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}'); if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } - if (field_ty.hasRuntimeBits()) { + if (field_ty.hasRuntimeBits(mod)) { try writer.print(" .{ } = ", .{fmtIdent(field_name)}); try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); try writer.writeByte(' '); } else for (ty.unionFields().values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, Value.undef, initializer_type); break; } @@ -1478,9 +1483,9 @@ pub const DeclGen = struct { }, ) !void { const store = &dg.ctypes.set; - const module = dg.module; + const mod = dg.module; - const fn_decl = module.declPtr(fn_decl_index); + const fn_decl = mod.declPtr(fn_decl_index); const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind); const fn_info = fn_decl.ty.fnInfo(); @@ -1498,7 +1503,7 @@ pub const DeclGen = struct { const trailing = try renderTypePrefix( dg.decl_index, store.*, - module, + mod, w, fn_cty_idx, .suffix, @@ -1525,7 +1530,7 @@ pub const DeclGen = struct { try renderTypeSuffix( dg.decl_index, store.*, - module, + mod, w, fn_cty_idx, .suffix, @@ -1577,9 +1582,9 @@ pub const DeclGen = struct { fn renderCType(dg: *DeclGen, w: anytype, idx: CType.Index) error{ OutOfMemory, AnalysisFail }!void { const store = &dg.ctypes.set; - const module = dg.module; - _ = try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); - try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); + const mod = dg.module; + _ = try renderTypePrefix(dg.decl_index, store.*, mod, w, idx, .suffix, .{}); + try renderTypeSuffix(dg.decl_index, store.*, mod, w, idx, .suffix, .{}); } const IntCastContext = union(enum) { @@ -1619,18 +1624,18 @@ pub const DeclGen = struct { /// | > 64 bit integer | < 64 bit integer | zig_make_(0, src) /// | > 64 bit integer | > 64 bit integer | zig_make_(zig_hi_(src), zig_lo_(src)) fn renderIntCast(dg: *DeclGen, w: anytype, dest_ty: Type, context: IntCastContext, src_ty: Type, location: ValueRenderLocation) !void { - const target = dg.module.getTarget(); - const dest_bits = dest_ty.bitSize(target); - const dest_int_info = dest_ty.intInfo(target); + const mod = dg.module; + const dest_bits = dest_ty.bitSize(mod); + const dest_int_info = dest_ty.intInfo(mod); - const src_is_ptr = src_ty.isPtrAtRuntime(); + const src_is_ptr = src_ty.isPtrAtRuntime(mod); const src_eff_ty: Type = if (src_is_ptr) switch (dest_int_info.signedness) { .unsigned => Type.usize, .signed => Type.isize, } else src_ty; - const src_bits = src_eff_ty.bitSize(target); - const src_int_info = if (src_eff_ty.isAbiInt()) src_eff_ty.intInfo(target) else null; + const src_bits = src_eff_ty.bitSize(mod); + const src_int_info = if (src_eff_ty.isAbiInt(mod)) src_eff_ty.intInfo(mod) else null; if (dest_bits <= 64 and src_bits <= 64) { const needs_cast = src_int_info == null or (toCIntBits(dest_int_info.bits) != toCIntBits(src_int_info.?.bits) or @@ -1703,8 +1708,8 @@ pub const DeclGen = struct { alignment: u32, kind: CType.Kind, ) error{ OutOfMemory, AnalysisFail }!void { - const target = dg.module.getTarget(); - const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)); + const mod = dg.module; + const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)); try dg.renderCTypeAndName(w, try dg.typeToIndex(ty, kind), name, qualifiers, alignas); } @@ -1717,7 +1722,7 @@ pub const DeclGen = struct { alignas: CType.AlignAs, ) error{ OutOfMemory, AnalysisFail }!void { const store = &dg.ctypes.set; - const module = dg.module; + const mod = dg.module; switch (std.math.order(alignas.@"align", alignas.abi)) { .lt => try w.print("zig_under_align({}) ", .{alignas.getAlign()}), @@ -1726,22 +1731,23 @@ pub const DeclGen = struct { } const trailing = - try renderTypePrefix(dg.decl_index, store.*, module, w, cty_idx, .suffix, qualifiers); + try renderTypePrefix(dg.decl_index, store.*, mod, w, cty_idx, .suffix, qualifiers); try w.print("{}", .{trailing}); try dg.writeCValue(w, name); - try renderTypeSuffix(dg.decl_index, store.*, module, w, cty_idx, .suffix, .{}); + try renderTypeSuffix(dg.decl_index, store.*, mod, w, cty_idx, .suffix, .{}); } fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool { + const mod = dg.module; switch (tv.val.tag()) { .extern_fn => return true, .function => { const func = tv.val.castTag(.function).?.data; - return dg.module.decl_exports.contains(func.owner_decl); + return mod.decl_exports.contains(func.owner_decl); }, .variable => { const variable = tv.val.castTag(.variable).?.data; - return dg.module.decl_exports.contains(variable.owner_decl); + return mod.decl_exports.contains(variable.owner_decl); }, else => unreachable, } @@ -1838,10 +1844,11 @@ pub const DeclGen = struct { } fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: Decl.Index, export_index: u32) !void { - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); + const mod = dg.module; + const decl = mod.declPtr(decl_index); + mod.markDeclAlive(decl); - if (dg.module.decl_exports.get(decl_index)) |exports| { + if (mod.decl_exports.get(decl_index)) |exports| { try writer.writeAll(exports.items[export_index].options.name); } else if (decl.isExtern()) { try writer.writeAll(mem.span(decl.name)); @@ -1850,7 +1857,7 @@ pub const DeclGen = struct { // expand to 3x the length of its input, but let's cut it off at a much shorter limit. var name: [100]u8 = undefined; var name_stream = std.io.fixedBufferStream(&name); - decl.renderFullyQualifiedName(dg.module, name_stream.writer()) catch |err| switch (err) { + decl.renderFullyQualifiedName(mod, name_stream.writer()) catch |err| switch (err) { error.NoSpaceLeft => {}, }; try writer.print("{}__{d}", .{ @@ -1894,10 +1901,10 @@ pub const DeclGen = struct { .bits => {}, } - const target = dg.module.getTarget(); - const int_info = if (ty.isAbiInt()) ty.intInfo(target) else std.builtin.Type.Int{ + const mod = dg.module; + const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(target)), + .bits = @intCast(u16, ty.bitSize(mod)), }; if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); @@ -1916,6 +1923,7 @@ pub const DeclGen = struct { val: Value, loc: ValueRenderLocation, ) !std.fmt.Formatter(formatIntLiteral) { + const mod = dg.module; const kind: CType.Kind = switch (loc) { .FunctionArgument => .parameter, .Initializer, .Other => .complete, @@ -1923,7 +1931,7 @@ pub const DeclGen = struct { }; return std.fmt.Formatter(formatIntLiteral){ .data = .{ .dg = dg, - .int_info = ty.intInfo(dg.module.getTarget()), + .int_info = ty.intInfo(mod), .kind = kind, .cty = try dg.typeToCType(ty, kind), .val = val, @@ -2646,11 +2654,12 @@ pub fn genDecl(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); + const mod = o.dg.module; const decl = o.dg.decl.?; const decl_c_value = .{ .decl = o.dg.decl_index.unwrap().? }; const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; - if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime()) return; + if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return; if (tv.val.tag() == .extern_fn) { const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll("zig_extern "); @@ -2704,8 +2713,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { .val = dg.decl.?.val, }; const writer = dg.fwd_decl.writer(); + const mod = dg.module; - switch (tv.ty.zigTypeTag()) { + switch (tv.ty.zigTypeTag(mod)) { .Fn => { const is_global = dg.declIsGlobal(tv); if (is_global) { @@ -2791,6 +2801,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con } fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { + const mod = f.object.dg.module; const air_tags = f.air.instructions.items(.tag); for (body) |inst| { @@ -2826,10 +2837,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none), .rem => blk: { const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType(); + const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType(mod); // For binary operations @TypeOf(lhs)==@TypeOf(rhs), // so we only check one. - break :blk if (lhs_scalar_ty.isInt()) + break :blk if (lhs_scalar_ty.isInt(mod)) try airBinOp(f, inst, "%", "rem", .none) else try airBinFloatOp(f, inst, "fmod"); @@ -3095,9 +3106,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ } fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const inst_ty = f.air.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3120,13 +3132,14 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; const inst_ty = f.air.typeOfIndex(inst); const ptr_ty = f.air.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); - const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); const ptr = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3155,9 +3168,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const inst_ty = f.air.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3180,13 +3194,14 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; const inst_ty = f.air.typeOfIndex(inst); const slice_ty = f.air.typeOf(bin_op.lhs); - const elem_ty = slice_ty.elemType2(); - const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(); + const elem_ty = slice_ty.elemType2(mod); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); const slice = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3209,9 +3224,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const inst_ty = f.air.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3234,14 +3250,14 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const inst_ty = f.air.typeOfIndex(inst); const elem_type = inst_ty.elemType(); - if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty }; + if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; - const target = f.object.dg.module.getTarget(); const local = try f.allocLocalValue( elem_type, - inst_ty.ptrAlignment(target), + inst_ty.ptrAlignment(mod), ); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; @@ -3250,14 +3266,14 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { } fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const inst_ty = f.air.typeOfIndex(inst); const elem_ty = inst_ty.elemType(); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty }; + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; - const target = f.object.dg.module.getTarget(); const local = try f.allocLocalValue( elem_ty, - inst_ty.ptrAlignment(target), + inst_ty.ptrAlignment(mod), ); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; @@ -3290,14 +3306,15 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { } fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const ptr_ty = f.air.typeOf(ty_op.operand); - const ptr_scalar_ty = ptr_ty.scalarType(); + const ptr_scalar_ty = ptr_ty.scalarType(mod); const ptr_info = ptr_scalar_ty.ptrInfo().data; const src_ty = ptr_info.pointee_type; - if (!src_ty.hasRuntimeBitsIgnoreComptime()) { + if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ty_op.operand}); return .none; } @@ -3306,9 +3323,8 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); - const target = f.object.dg.module.getTarget(); - const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(target); - const is_array = lowersToArray(src_ty, target); + const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(mod); + const is_array = lowersToArray(src_ty, mod); const need_memcpy = !is_aligned or is_array; const writer = f.object.writer(); @@ -3327,17 +3343,10 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, src_ty); try writer.writeAll("))"); } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) { - var host_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const host_ty = Type.initPayload(&host_pl.base); + const host_bits: u16 = ptr_info.host_size * 8; + const host_ty = try mod.intType(.unsigned, host_bits); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(host_pl.data - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -3345,11 +3354,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - var field_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, src_ty.bitSize(target)), - }; - const field_ty = Type.initPayload(&field_pl.base); + const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod))); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -3360,9 +3365,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("(("); try f.renderType(writer, field_ty); try writer.writeByte(')'); - const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; + const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64; if (cant_cast) { - if (field_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); @@ -3390,23 +3395,23 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { } fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); - const target = f.object.dg.module.getTarget(); const op_inst = Air.refToIndex(un_op); const op_ty = f.air.typeOf(un_op); const ret_ty = if (is_ptr) op_ty.childType() else op_ty; var lowered_ret_buf: LowerFnRetTyBuffer = undefined; - const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); + const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); if (op_inst != null and f.air.instructions.items(.tag)[op_inst.?] == .call_always_tail) { try reap(f, inst, &.{un_op}); _ = try airCall(f, op_inst.?, .always_tail); - } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); var deref = is_ptr; - const is_array = lowersToArray(ret_ty, target); + const is_array = lowersToArray(ret_ty, mod); const ret_val = if (is_array) ret_val: { const array_local = try f.allocLocal(inst, lowered_ret_ty); try writer.writeAll("memcpy("); @@ -3442,15 +3447,16 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { } fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3467,20 +3473,20 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const target = f.object.dg.module.getTarget(); - const dest_int_info = inst_scalar_ty.intInfo(target); + const inst_scalar_ty = inst_ty.scalarType(mod); + const dest_int_info = inst_scalar_ty.intInfo(mod); const dest_bits = dest_int_info.bits; const dest_c_bits = toCIntBits(dest_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); - const scalar_int_info = scalar_ty.intInfo(target); + const scalar_ty = operand_ty.scalarType(mod); + const scalar_int_info = scalar_ty.intInfo(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3515,7 +3521,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { var stack align(@alignOf(ExpectedContents)) = std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - const mask_val = try inst_scalar_ty.maxInt(stack.get(), target); + const mask_val = try inst_scalar_ty.maxInt(stack.get(), mod); try writer.writeAll("zig_and_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); try writer.writeByte('('); @@ -3577,17 +3583,18 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue { } fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { + const mod = f.object.dg.module; // *a = b; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const ptr_ty = f.air.typeOf(bin_op.lhs); - const ptr_scalar_ty = ptr_ty.scalarType(); + const ptr_scalar_ty = ptr_ty.scalarType(mod); const ptr_info = ptr_scalar_ty.ptrInfo().data; const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.air.typeOf(bin_op.rhs); - const val_is_undef = if (f.air.value(bin_op.rhs)) |v| v.isUndefDeep() else false; + const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -3602,10 +3609,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { return .none; } - const target = f.object.dg.module.getTarget(); const is_aligned = ptr_info.@"align" == 0 or - ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(target); - const is_array = lowersToArray(ptr_info.pointee_type, target); + ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(mod); + const is_array = lowersToArray(ptr_info.pointee_type, mod); const need_memcpy = !is_aligned or is_array; const src_val = try f.resolveInst(bin_op.rhs); @@ -3647,14 +3653,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) { const host_bits = ptr_info.host_size * 8; - var host_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = host_bits }; - const host_ty = Type.initPayload(&host_pl.base); + const host_ty = try mod.intType(.unsigned, host_bits); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(host_bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -3662,7 +3663,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - const src_bits = src_ty.bitSize(target); + const src_bits = src_ty.bitSize(mod); const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb; var stack align(@alignOf(ExpectedContents)) = @@ -3693,9 +3694,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(host_ty, mask_val)}); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); - const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; + const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64; if (cant_cast) { - if (src_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (src_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_make_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeAll("(0, "); @@ -3705,7 +3706,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeByte(')'); } - if (src_ty.isPtrAtRuntime()) { + if (src_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); @@ -3728,6 +3729,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3737,7 +3739,7 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: const inst_ty = f.air.typeOfIndex(inst); const operand_ty = f.air.typeOf(bin_op.lhs); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const w = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3765,9 +3767,10 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: } fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits); const op = try f.resolveInst(ty_op.operand); @@ -3797,11 +3800,11 @@ fn airBinOp( operation: []const u8, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const operand_ty = f.air.typeOf(bin_op.lhs); - const scalar_ty = operand_ty.scalarType(); - const target = f.object.dg.module.getTarget(); - if ((scalar_ty.isInt() and scalar_ty.bitSize(target) > 64) or scalar_ty.isRuntimeFloat()) + const scalar_ty = operand_ty.scalarType(mod); + if ((scalar_ty.isInt(mod) and scalar_ty.bitSize(mod) > 64) or scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, info); const lhs = try f.resolveInst(bin_op.lhs); @@ -3835,12 +3838,12 @@ fn airCmpOp( data: anytype, operator: std.math.CompareOperator, ) !CValue { + const mod = f.object.dg.module; const lhs_ty = f.air.typeOf(data.lhs); - const scalar_ty = lhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); - const target = f.object.dg.module.getTarget(); - const scalar_bits = scalar_ty.bitSize(target); - if (scalar_ty.isInt() and scalar_bits > 64) + const scalar_bits = scalar_ty.bitSize(mod); + if (scalar_ty.isInt(mod) and scalar_bits > 64) return airCmpBuiltinCall( f, inst, @@ -3885,12 +3888,12 @@ fn airEquality( inst: Air.Inst.Index, operator: std.math.CompareOperator, ) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const operand_ty = f.air.typeOf(bin_op.lhs); - const target = f.object.dg.module.getTarget(); - const operand_bits = operand_ty.bitSize(target); - if (operand_ty.isInt() and operand_bits > 64) + const operand_bits = operand_ty.bitSize(mod); + if (operand_ty.isInt(mod) and operand_bits > 64) return airCmpBuiltinCall( f, inst, @@ -3912,7 +3915,7 @@ fn airEquality( try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (operand_ty.zigTypeTag() == .Optional and !operand_ty.optionalReprIsPayload()) { + if (operand_ty.zigTypeTag(mod) == .Optional and !operand_ty.optionalReprIsPayload(mod)) { // (A && B) || (C && (A == B)) // A = lhs.is_null ; B = rhs.is_null ; C = rhs.payload == lhs.payload @@ -3965,6 +3968,7 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3973,8 +3977,8 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const elem_ty = inst_scalar_ty.elemType2(); + const inst_scalar_ty = inst_ty.scalarType(mod); + const elem_ty = inst_scalar_ty.elemType2(mod); const local = try f.allocLocal(inst, inst_ty); const writer = f.object.writer(); @@ -3983,7 +3987,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { try v.elem(f, writer); try writer.writeAll(" = "); - if (elem_ty.hasRuntimeBitsIgnoreComptime()) { + if (elem_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We must convert to and from integer types to prevent UB if the operation // results in a NULL pointer, or if LHS is NULL. The operation is only UB // if the result is NULL and then dereferenced. @@ -4012,13 +4016,13 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { } fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []const u8) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); - const target = f.object.dg.module.getTarget(); - if (inst_scalar_ty.isInt() and inst_scalar_ty.bitSize(target) > 64) + if (inst_scalar_ty.isInt(mod) and inst_scalar_ty.bitSize(mod) > 64) return try airBinBuiltinCall(f, inst, operation[1..], .none); if (inst_scalar_ty.isRuntimeFloat()) return try airBinFloatOp(f, inst, operation); @@ -4092,12 +4096,11 @@ fn airCall( inst: Air.Inst.Index, modifier: std.builtin.CallModifier, ) !CValue { + const mod = f.object.dg.module; // Not even allowed to call panic in a naked function. if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; const gpa = f.object.dg.gpa; - const module = f.object.dg.module; - const target = module.getTarget(); const writer = f.object.writer(); const pl_op = f.air.instructions.items(.data)[inst].pl_op; @@ -4116,7 +4119,7 @@ fn airCall( resolved_arg.* = try f.resolveInst(arg); if (arg_cty != try f.typeToIndex(arg_ty, .complete)) { var lowered_arg_buf: LowerFnRetTyBuffer = undefined; - const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, target); + const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, mod); const array_local = try f.allocLocal(inst, lowered_arg_ty); try writer.writeAll("memcpy("); @@ -4139,7 +4142,7 @@ fn airCall( } const callee_ty = f.air.typeOf(pl_op.operand); - const fn_ty = switch (callee_ty.zigTypeTag()) { + const fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => callee_ty.childType(), else => unreachable, @@ -4147,13 +4150,13 @@ fn airCall( const ret_ty = fn_ty.fnReturnType(); var lowered_ret_buf: LowerFnRetTyBuffer = undefined; - const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); + const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); const result_local = result: { if (modifier == .always_tail) { try writer.writeAll("zig_always_tail return "); break :result .none; - } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result .none; } else if (f.liveness.isUnused(inst)) { try writer.writeByte('('); @@ -4171,7 +4174,7 @@ fn airCall( callee: { known: { const fn_decl = fn_decl: { - const callee_val = f.air.value(pl_op.operand) orelse break :known; + const callee_val = f.air.value(pl_op.operand, mod) orelse break :known; break :fn_decl switch (callee_val.tag()) { .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl, .function => callee_val.castTag(.function).?.data.owner_decl, @@ -4181,9 +4184,9 @@ fn airCall( }; switch (modifier) { .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl, 0), - inline .never_tail, .never_inline => |mod| try writer.writeAll(try f.getLazyFnName( - @unionInit(LazyFnKey, @tagName(mod), fn_decl), - @unionInit(LazyFnValue.Data, @tagName(mod), {}), + inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName( + @unionInit(LazyFnKey, @tagName(m), fn_decl), + @unionInit(LazyFnValue.Data, @tagName(m), {}), )), else => unreachable, } @@ -4211,7 +4214,7 @@ fn airCall( try writer.writeAll(");\n"); const result = result: { - if (result_local == .none or !lowersToArray(ret_ty, target)) + if (result_local == .none or !lowersToArray(ret_ty, mod)) break :result result_local; const array_local = try f.allocLocal(inst, ret_ty); @@ -4254,9 +4257,10 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { } fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const name = f.air.nullTerminatedString(pl_op.payload); - const operand_is_undef = if (f.air.value(pl_op.operand)) |v| v.isUndefDeep() else false; + const operand_is_undef = if (f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4330,12 +4334,13 @@ fn lowerTry( err_union_ty: Type, is_ptr: bool, ) !CValue { + const mod = f.object.dg.module; const err_union = try f.resolveInst(operand); const inst_ty = f.air.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); const payload_ty = err_union_ty.errorUnionPayload(); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { try writer.writeAll("if ("); @@ -4431,6 +4436,8 @@ const LocalResult = struct { need_free: bool, fn move(lr: LocalResult, f: *Function, inst: Air.Inst.Index, dest_ty: Type) !CValue { + const mod = f.object.dg.module; + if (lr.need_free) { // Move the freshly allocated local to be owned by this instruction, // by returning it here instead of freeing it. @@ -4441,7 +4448,7 @@ const LocalResult = struct { try lr.free(f); const writer = f.object.writer(); try f.writeCValue(writer, local, .Other); - if (dest_ty.isAbiInt()) { + if (dest_ty.isAbiInt(mod)) { try writer.writeAll(" = "); } else { try writer.writeAll(" = ("); @@ -4461,12 +4468,13 @@ const LocalResult = struct { }; fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !LocalResult { - const target = f.object.dg.module.getTarget(); + const mod = f.object.dg.module; + const target = mod.getTarget(); const writer = f.object.writer(); - if (operand_ty.isAbiInt() and dest_ty.isAbiInt()) { - const src_info = dest_ty.intInfo(target); - const dest_info = operand_ty.intInfo(target); + if (operand_ty.isAbiInt(mod) and dest_ty.isAbiInt(mod)) { + const src_info = dest_ty.intInfo(mod); + const dest_info = operand_ty.intInfo(mod); if (src_info.signedness == dest_info.signedness and src_info.bits == dest_info.bits) { @@ -4477,7 +4485,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca } } - if (dest_ty.isPtrAtRuntime() and operand_ty.isPtrAtRuntime()) { + if (dest_ty.isPtrAtRuntime(mod) and operand_ty.isPtrAtRuntime(mod)) { const local = try f.allocLocal(0, dest_ty); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = ("); @@ -4494,7 +4502,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca const operand_lval = if (operand == .constant) blk: { const operand_local = try f.allocLocal(0, operand_ty); try f.writeCValue(writer, operand_local, .Other); - if (operand_ty.isAbiInt()) { + if (operand_ty.isAbiInt(mod)) { try writer.writeAll(" = "); } else { try writer.writeAll(" = ("); @@ -4516,13 +4524,10 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca try writer.writeAll("));\n"); // Ensure padding bits have the expected value. - if (dest_ty.isAbiInt()) { + if (dest_ty.isAbiInt(mod)) { const dest_cty = try f.typeToCType(dest_ty, .complete); - const dest_info = dest_ty.intInfo(target); - var info_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) { - .unsigned => .int_unsigned, - .signed => .int_signed, - } }, .data = dest_info.bits }; + const dest_info = dest_ty.intInfo(mod); + var bits: u16 = dest_info.bits; var wrap_cty: ?CType = null; var need_bitcasts = false; @@ -4535,9 +4540,9 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca const elem_cty = f.indexToCType(pl.data.elem_type); wrap_cty = elem_cty.toSignedness(dest_info.signedness); need_bitcasts = wrap_cty.?.tag() == .zig_i128; - info_ty_pl.data -= 1; - info_ty_pl.data %= @intCast(u16, f.byteSize(elem_cty) * 8); - info_ty_pl.data += 1; + bits -= 1; + bits %= @intCast(u16, f.byteSize(elem_cty) * 8); + bits += 1; } try writer.writeAll(" = "); if (need_bitcasts) { @@ -4546,7 +4551,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca try writer.writeByte('('); } try writer.writeAll("zig_wrap_"); - const info_ty = Type.initPayload(&info_ty_pl.base); + const info_ty = try mod.intType(dest_info.signedness, bits); if (wrap_cty) |cty| try f.object.dg.renderCTypeForBuiltinFnName(writer, cty) else @@ -4675,6 +4680,7 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const condition = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4683,11 +4689,11 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); try writer.writeAll("switch ("); - if (condition_ty.zigTypeTag() == .Bool) { + if (condition_ty.zigTypeTag(mod) == .Bool) { try writer.writeByte('('); try f.renderType(writer, Type.u1); try writer.writeByte(')'); - } else if (condition_ty.isPtrAtRuntime()) { + } else if (condition_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); @@ -4714,12 +4720,12 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { for (items) |item| { try f.object.indent_writer.insertNewline(); try writer.writeAll("case "); - if (condition_ty.isPtrAtRuntime()) { + if (condition_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); } - try f.object.dg.renderValue(writer, condition_ty, f.air.value(item).?, .Other); + try f.object.dg.renderValue(writer, condition_ty, f.air.value(item, mod).?, .Other); try writer.writeByte(':'); } try writer.writeByte(' '); @@ -4764,6 +4770,7 @@ fn asmInputNeedsLocal(constraint: []const u8, value: CValue) bool { } fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; @@ -4778,7 +4785,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const result = result: { const writer = f.object.writer(); const inst_ty = f.air.typeOfIndex(inst); - const local = if (inst_ty.hasRuntimeBitsIgnoreComptime()) local: { + const local = if (inst_ty.hasRuntimeBitsIgnoreComptime(mod)) local: { const local = try f.allocLocal(inst, inst_ty); if (f.wantSafety()) { try f.writeCValue(writer, local, .Other); @@ -5025,6 +5032,7 @@ fn airIsNull( operator: []const u8, is_ptr: bool, ) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); @@ -5046,14 +5054,14 @@ fn airIsNull( const payload_ty = optional_ty.optionalChild(&payload_buf); var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) TypedValue{ .ty = Type.bool, .val = Value.true } - else if (optional_ty.isPtrLikeOptional()) + else if (optional_ty.isPtrLikeOptional(mod)) // operand is a regular pointer, test `operand !=/== NULL` TypedValue{ .ty = optional_ty, .val = Value.null } - else if (payload_ty.zigTypeTag() == .ErrorSet) + else if (payload_ty.zigTypeTag(mod) == .ErrorSet) TypedValue{ .ty = payload_ty, .val = Value.zero } - else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload()) rhs: { + else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf); break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null }; @@ -5070,6 +5078,7 @@ fn airIsNull( } fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -5079,7 +5088,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return .none; } @@ -5087,7 +5096,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); try f.writeCValue(writer, operand, .Other); @@ -5104,6 +5113,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { } fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const writer = f.object.writer(); @@ -5113,14 +5123,14 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { const opt_ty = ptr_ty.childType(); const inst_ty = f.air.typeOfIndex(inst); - if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime(mod)) { return .{ .undef = inst_ty }; } const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { // the operand is just a regular pointer, no need to do anything special. // *?*T -> **T and ?*T -> *T are **T -> **T and *T -> *T in C try writer.writeAll(" = "); @@ -5134,6 +5144,7 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); @@ -5144,7 +5155,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.air.typeOfIndex(inst); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { if (f.liveness.isUnused(inst)) { return .none; } @@ -5179,36 +5190,36 @@ fn fieldLocation( container_ty: Type, field_ptr_ty: Type, field_index: u32, - target: std.Target, + mod: *const Module, ) union(enum) { begin: void, field: CValue, byte_offset: u32, end: void, } { - return switch (container_ty.zigTypeTag()) { + return switch (container_ty.zigTypeTag(mod)) { .Struct => switch (container_ty.containerLayout()) { .Auto, .Extern => for (field_index..container_ty.structFieldCount()) |next_field_index| { if (container_ty.structFieldIsComptime(next_field_index)) continue; const field_ty = container_ty.structFieldType(next_field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; break .{ .field = if (container_ty.isSimpleTuple()) .{ .field = next_field_index } else .{ .identifier = container_ty.structFieldName(next_field_index) } }; - } else if (container_ty.hasRuntimeBitsIgnoreComptime()) .end else .begin, + } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin, .Packed => if (field_ptr_ty.ptrInfo().data.host_size == 0) - .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, target) } + .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) } else .begin, }, .Union => switch (container_ty.containerLayout()) { .Auto, .Extern => { const field_ty = container_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return if (container_ty.unionTagTypeSafety() != null and - !container_ty.unionHasAllZeroBitFieldTypes()) + !container_ty.unionHasAllZeroBitFieldTypes(mod)) .{ .field = .{ .identifier = "payload" } } else .begin; @@ -5252,10 +5263,10 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue } fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const target = f.object.dg.module.getTarget(); const container_ptr_ty = f.air.typeOfIndex(inst); const container_ty = container_ptr_ty.childType(); @@ -5270,7 +5281,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, container_ptr_ty); try writer.writeByte(')'); - switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, target)) { + switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, mod)) { .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), .field => |field| { var u8_ptr_pl = field_ptr_ty.ptrInfo(); @@ -5321,7 +5332,7 @@ fn fieldPtr( container_ptr_val: CValue, field_index: u32, ) !CValue { - const target = f.object.dg.module.getTarget(); + const mod = f.object.dg.module; const container_ty = container_ptr_ty.elemType(); const field_ptr_ty = f.air.typeOfIndex(inst); @@ -5335,7 +5346,7 @@ fn fieldPtr( try f.renderType(writer, field_ptr_ty); try writer.writeByte(')'); - switch (fieldLocation(container_ty, field_ptr_ty, field_index, target)) { + switch (fieldLocation(container_ty, field_ptr_ty, field_index, mod)) { .begin => try f.writeCValue(writer, container_ptr_val, .Initializer), .field => |field| { try writer.writeByte('&'); @@ -5370,16 +5381,16 @@ fn fieldPtr( } fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; const inst_ty = f.air.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{extra.struct_operand}); return .none; } - const target = f.object.dg.module.getTarget(); const struct_byval = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); const struct_ty = f.air.typeOf(extra.struct_operand); @@ -5396,32 +5407,21 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { .{ .identifier = struct_ty.structFieldName(extra.field_index) }, .Packed => { const struct_obj = struct_ty.castTag(.@"struct").?.data; - const int_info = struct_ty.intInfo(target); + const int_info = struct_ty.intInfo(mod); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = struct_obj.packedFieldBitOffset(target, extra.field_index), + .data = struct_obj.packedFieldBitOffset(mod, extra.field_index), }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - const field_int_signedness = if (inst_ty.isAbiInt()) - inst_ty.intInfo(target).signedness + const field_int_signedness = if (inst_ty.isAbiInt(mod)) + inst_ty.intInfo(mod).signedness else .unsigned; - var field_int_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (field_int_signedness) { - .unsigned => .int_unsigned, - .signed => .int_signed, - } }, - .data = @intCast(u16, inst_ty.bitSize(target)), - }; - const field_int_ty = Type.initPayload(&field_int_pl.base); + const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod))); const temp_local = try f.allocLocal(inst, field_int_ty); try f.writeCValue(writer, temp_local, .Other); @@ -5432,7 +5432,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); const cant_cast = int_info.bits > 64; if (cant_cast) { - if (field_int_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); @@ -5511,6 +5511,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { /// *(E!T) -> E /// Note that the result is never a pointer. fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); @@ -5518,13 +5519,13 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const operand_ty = f.air.typeOf(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_is_ptr = operand_ty.zigTypeTag() == .Pointer; + const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer; const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const local = try f.allocLocal(inst, inst_ty); - if (!payload_ty.hasRuntimeBits() and operand == .local and operand.local == local.new_local) { + if (!payload_ty.hasRuntimeBits(mod) and operand == .local and operand.local == local.new_local) { // The store will be 'x = x'; elide it. return local; } @@ -5533,7 +5534,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBits(mod)) { try f.writeCValue(writer, operand, .Other); } else { if (!error_ty.errorSetIsEmpty()) @@ -5549,6 +5550,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); @@ -5558,7 +5560,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; const writer = f.object.writer(); - if (!error_union_ty.errorUnionPayload().hasRuntimeBits()) { + if (!error_union_ty.errorUnionPayload().hasRuntimeBits(mod)) { if (!is_ptr) return .none; const local = try f.allocLocal(inst, inst_ty); @@ -5584,10 +5586,11 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu } fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); - const repr_is_payload = inst_ty.optionalReprIsPayload(); + const repr_is_payload = inst_ty.optionalReprIsPayload(mod); const payload_ty = f.air.typeOf(ty_op.operand); const payload = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5615,11 +5618,12 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_ty = inst_ty.errorUnionSet(); const err = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5653,6 +5657,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const writer = f.object.writer(); const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -5662,7 +5667,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const payload_ty = error_union_ty.errorUnionPayload(); // First, set the non-error value. - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { try f.writeCValueDeref(writer, operand); try writer.writeAll(" = "); try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); @@ -5703,12 +5708,13 @@ fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); const payload = try f.resolveInst(ty_op.operand); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_ty = inst_ty.errorUnionSet(); try reap(f, inst, &.{ty_op.operand}); @@ -5735,6 +5741,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { } fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const u8) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); @@ -5750,7 +5757,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const try writer.writeAll(" = "); if (!error_ty.errorSetIsEmpty()) - if (payload_ty.hasRuntimeBits()) + if (payload_ty.hasRuntimeBits(mod)) if (is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) else @@ -5768,6 +5775,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const } fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -5784,7 +5792,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { if (operand == .undef) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf) }, .Initializer); - } else if (array_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); @@ -5801,6 +5809,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); @@ -5810,10 +5819,10 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { const target = f.object.dg.module.getTarget(); const operation = if (inst_ty.isRuntimeFloat() and operand_ty.isRuntimeFloat()) if (inst_ty.floatBits(target) < operand_ty.floatBits(target)) "trunc" else "extend" - else if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) - if (inst_ty.isSignedInt()) "fix" else "fixuns" - else if (inst_ty.isRuntimeFloat() and operand_ty.isInt()) - if (operand_ty.isSignedInt()) "float" else "floatun" + else if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) + if (inst_ty.isSignedInt(mod)) "fix" else "fixuns" + else if (inst_ty.isRuntimeFloat() and operand_ty.isInt(mod)) + if (operand_ty.isSignedInt(mod)) "float" else "floatun" else unreachable; @@ -5822,19 +5831,19 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) { + if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) { try writer.writeAll("zig_wrap_"); try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); try writer.writeByte('('); } try writer.writeAll("zig_"); try writer.writeAll(operation); - try writer.writeAll(compilerRtAbbrev(operand_ty, target)); - try writer.writeAll(compilerRtAbbrev(inst_ty, target)); + try writer.writeAll(compilerRtAbbrev(operand_ty, mod)); + try writer.writeAll(compilerRtAbbrev(inst_ty, mod)); try writer.writeByte('('); try f.writeCValue(writer, operand, .FunctionArgument); try writer.writeByte(')'); - if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) { + if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) { try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits); try writer.writeByte(')'); } @@ -5871,14 +5880,15 @@ fn airUnBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); const ref_ret = inst_scalar_cty.tag() == .array; @@ -5914,6 +5924,7 @@ fn airBinBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const operand_ty = f.air.typeOf(bin_op.lhs); @@ -5925,8 +5936,8 @@ fn airBinBuiltinCall( if (!is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const scalar_ty = operand_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); + const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); const ref_ret = inst_scalar_cty.tag() == .array; @@ -5968,14 +5979,15 @@ fn airCmpBuiltinCall( operation: enum { cmp, operator }, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const lhs = try f.resolveInst(data.lhs); const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const operand_ty = f.air.typeOf(data.lhs); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); const ref_ret = inst_scalar_cty.tag() == .array; @@ -6017,6 +6029,7 @@ fn airCmpBuiltinCall( } fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const inst_ty = f.air.typeOfIndex(inst); @@ -6030,15 +6043,13 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue const new_value_mat = try Materialize.start(f, inst, writer, ty, new_value); try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; - const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const repr_ty = if (ty.isRuntimeFloat()) + mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + else + ty; const local = try f.allocLocal(inst, inst_ty); - if (inst_ty.isPtrLikeOptional()) { + if (inst_ty.isPtrLikeOptional(mod)) { { const a = try Assignment.start(f, writer, ty); try f.writeCValue(writer, local, .Other); @@ -6123,6 +6134,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue } fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; const inst_ty = f.air.typeOfIndex(inst); @@ -6135,14 +6147,10 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const operand_mat = try Materialize.start(f, inst, writer, ty, operand); try reap(f, inst, &.{ pl_op.operand, extra.operand }); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; + const repr_bits = @intCast(u16, ty.abiSize(mod) * 8); const is_float = ty.isRuntimeFloat(); - const is_128 = repr_pl.data == 128; - const repr_ty = if (is_float) Type.initPayload(&repr_pl.base) else ty; + const is_128 = repr_bits == 128; + const repr_ty = if (is_float) mod.intType(.unsigned, repr_bits) catch unreachable else ty; const local = try f.allocLocal(inst, inst_ty); try writer.print("zig_atomicrmw_{s}", .{toAtomicRmwSuffix(extra.op())}); @@ -6181,18 +6189,17 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const atomic_load = f.air.instructions.items(.data)[inst].atomic_load; const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); const ptr_ty = f.air.typeOf(atomic_load.ptr); const ty = ptr_ty.childType(); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; - const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const repr_ty = if (ty.isRuntimeFloat()) + mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + else + ty; const inst_ty = f.air.typeOfIndex(inst); const writer = f.object.writer(); @@ -6218,6 +6225,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const ptr_ty = f.air.typeOf(bin_op.lhs); const ty = ptr_ty.childType(); @@ -6228,12 +6236,10 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa const element_mat = try Materialize.start(f, inst, writer, ty, element); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; - const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const repr_ty = if (ty.isRuntimeFloat()) + mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + else + ty; try writer.writeAll("zig_atomic_store((zig_atomic("); try f.renderType(writer, ty); @@ -6262,14 +6268,14 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo } fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const dest_ty = f.air.typeOf(bin_op.lhs); const dest_slice = try f.resolveInst(bin_op.lhs); const value = try f.resolveInst(bin_op.rhs); const elem_ty = f.air.typeOf(bin_op.rhs); - const target = f.object.dg.module.getTarget(); - const elem_abi_size = elem_ty.abiSize(target); - const val_is_undef = if (f.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; + const elem_abi_size = elem_ty.abiSize(mod); + const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; const writer = f.object.writer(); if (val_is_undef) { @@ -6383,12 +6389,12 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try f.resolveInst(bin_op.lhs); const src_ptr = try f.resolveInst(bin_op.rhs); const dest_ty = f.air.typeOf(bin_op.lhs); const src_ty = f.air.typeOf(bin_op.rhs); - const target = f.object.dg.module.getTarget(); const writer = f.object.writer(); try writer.writeAll("memcpy("); @@ -6399,7 +6405,7 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { switch (dest_ty.ptrSize()) { .Slice => { const elem_ty = dest_ty.childType(); - const elem_abi_size = elem_ty.abiSize(target); + const elem_abi_size = elem_ty.abiSize(mod); try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" }); if (elem_abi_size > 1) { try writer.print(" * {d});\n", .{elem_abi_size}); @@ -6410,7 +6416,7 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { .One => { const array_ty = dest_ty.childType(); const elem_ty = array_ty.childType(); - const elem_abi_size = elem_ty.abiSize(target); + const elem_abi_size = elem_ty.abiSize(mod); const len = array_ty.arrayLen() * elem_abi_size; try writer.print("{d});\n", .{len}); }, @@ -6422,14 +6428,14 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const union_ptr = try f.resolveInst(bin_op.lhs); const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const target = f.object.dg.module.getTarget(); const union_ty = f.air.typeOf(bin_op.lhs).childType(); - const layout = union_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; const tag_ty = union_ty.unionTagTypeSafety().?; @@ -6443,14 +6449,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { } fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const union_ty = f.air.typeOf(ty_op.operand); - const target = f.object.dg.module.getTarget(); - const layout = union_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; const inst_ty = f.air.typeOfIndex(inst); @@ -6501,13 +6507,14 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6555,6 +6562,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { } fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -6562,8 +6570,6 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const lhs = try f.resolveInst(extra.a); const rhs = try f.resolveInst(extra.b); - const module = f.object.dg.module; - const target = module.getTarget(); const inst_ty = f.air.typeOfIndex(inst); const writer = f.object.writer(); @@ -6581,7 +6587,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("] = "); var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(target); + const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); var src_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = @intCast(u64, mask_elem ^ mask_elem >> 63), @@ -6597,16 +6603,17 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { } fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const reduce = f.air.instructions.items(.data)[inst].reduce; - const target = f.object.dg.module.getTarget(); + const target = mod.getTarget(); const scalar_ty = f.air.typeOfIndex(inst); const operand = try f.resolveInst(reduce.operand); try reap(f, inst, &.{reduce.operand}); const operand_ty = f.air.typeOf(reduce.operand); const writer = f.object.writer(); - const use_operator = scalar_ty.bitSize(target) <= 64; + const use_operator = scalar_ty.bitSize(mod) <= 64; const op: union(enum) { const Func = struct { operation: []const u8, info: BuiltinInfo = .none }; float_op: Func, @@ -6617,28 +6624,28 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .And => if (use_operator) .{ .infix = " &= " } else .{ .builtin = .{ .operation = "and" } }, .Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } }, .Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } }, - .Min => switch (scalar_ty.zigTypeTag()) { + .Min => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .ternary = " < " } else .{ .builtin = .{ .operation = "min" }, }, .Float => .{ .float_op = .{ .operation = "fmin" } }, else => unreachable, }, - .Max => switch (scalar_ty.zigTypeTag()) { + .Max => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .ternary = " > " } else .{ .builtin = .{ .operation = "max" }, }, .Float => .{ .float_op = .{ .operation = "fmax" } }, else => unreachable, }, - .Add => switch (scalar_ty.zigTypeTag()) { + .Add => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .infix = " += " } else .{ .builtin = .{ .operation = "addw", .info = .bits }, }, .Float => .{ .builtin = .{ .operation = "add" } }, else => unreachable, }, - .Mul => switch (scalar_ty.zigTypeTag()) { + .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .infix = " *= " } else .{ .builtin = .{ .operation = "mulw", .info = .bits }, }, @@ -6680,22 +6687,22 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { .Or, .Xor, .Add => Value.zero, - .And => switch (scalar_ty.zigTypeTag()) { + .And => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one, - else => switch (scalar_ty.intInfo(target).signedness) { - .unsigned => try scalar_ty.maxInt(stack.get(), target), + else => switch (scalar_ty.intInfo(mod).signedness) { + .unsigned => try scalar_ty.maxInt(stack.get(), mod), .signed => Value.negative_one, }, }, - .Min => switch (scalar_ty.zigTypeTag()) { + .Min => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one, - .Int => try scalar_ty.maxInt(stack.get(), target), + .Int => try scalar_ty.maxInt(stack.get(), mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, - .Max => switch (scalar_ty.zigTypeTag()) { + .Max => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.zero, - .Int => try scalar_ty.minInt(stack.get(), target), + .Int => try scalar_ty.minInt(stack.get(), mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, @@ -6753,6 +6760,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const inst_ty = f.air.typeOfIndex(inst); const len = @intCast(usize, inst_ty.arrayLen()); @@ -6770,11 +6778,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } } - const target = f.object.dg.module.getTarget(); - const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - switch (inst_ty.zigTypeTag()) { + switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => { const elem_ty = inst_ty.childType(); const a = try Assignment.init(f, elem_ty); @@ -6799,7 +6805,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { .Auto, .Extern => for (resolved_elements, 0..) |element, field_i| { if (inst_ty.structFieldIsComptime(field_i)) continue; const field_ty = inst_ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const a = try Assignment.start(f, writer, field_ty); try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple()) @@ -6813,13 +6819,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { .Packed => { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - const int_info = inst_ty.intInfo(target); + const int_info = inst_ty.intInfo(mod); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); @@ -6828,7 +6830,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { for (0..elements.len) |field_i| { if (inst_ty.structFieldIsComptime(field_i)) continue; const field_ty = inst_ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) { try writer.writeAll("zig_or_"); @@ -6841,7 +6843,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { for (resolved_elements, 0..) |element, field_i| { if (inst_ty.structFieldIsComptime(field_i)) continue; const field_ty = inst_ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeAll(", "); // TODO: Skip this entire shift if val is 0? @@ -6849,13 +6851,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); try writer.writeByte('('); - if (inst_ty.isAbiInt() and (field_ty.isAbiInt() or field_ty.isPtrAtRuntime())) { + if (inst_ty.isAbiInt(mod) and (field_ty.isAbiInt(mod) or field_ty.isPtrAtRuntime(mod))) { try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument); } else { try writer.writeByte('('); try f.renderType(writer, inst_ty); try writer.writeByte(')'); - if (field_ty.isPtrAtRuntime()) { + if (field_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, switch (int_info.signedness) { .unsigned => Type.usize, @@ -6872,7 +6874,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); if (!empty) try writer.writeByte(')'); - bit_offset_val_pl.data += field_ty.bitSize(target); + bit_offset_val_pl.data += field_ty.bitSize(mod); empty = false; } @@ -6886,11 +6888,11 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = f.air.typeOfIndex(inst); - const target = f.object.dg.module.getTarget(); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_name = union_obj.fields.keys()[extra.field_index]; const payload_ty = f.air.typeOf(extra.init); @@ -6908,7 +6910,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { } const field: CValue = if (union_ty.unionTagTypeSafety()) |tag_ty| field: { - const layout = union_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size != 0) { const field_index = tag_ty.enumFieldIndex(field_name).?; @@ -6991,13 +6993,14 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue { } fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const operand_ty = f.air.typeOf(un_op); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, operand_ty); @@ -7016,13 +7019,14 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7043,6 +7047,7 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal } fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const lhs = try f.resolveInst(bin_op.lhs); @@ -7050,7 +7055,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7074,6 +7079,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa } fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data; @@ -7083,7 +7089,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7279,8 +7285,9 @@ fn signAbbrev(signedness: std.builtin.Signedness) u8 { }; } -fn compilerRtAbbrev(ty: Type, target: std.Target) []const u8 { - return if (ty.isInt()) switch (ty.intInfo(target).bits) { +fn compilerRtAbbrev(ty: Type, mod: *Module) []const u8 { + const target = mod.getTarget(); + return if (ty.isInt(mod)) switch (ty.intInfo(mod).bits) { 1...32 => "si", 33...64 => "di", 65...128 => "ti", @@ -7407,7 +7414,7 @@ fn undefPattern(comptime IntType: type) IntType { const FormatIntLiteralContext = struct { dg: *DeclGen, - int_info: std.builtin.Type.Int, + int_info: InternPool.Key.IntType, kind: CType.Kind, cty: CType, val: Value, @@ -7418,7 +7425,8 @@ fn formatIntLiteral( options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - const target = data.dg.module.getTarget(); + const mod = data.dg.module; + const target = mod.getTarget(); const ExpectedContents = struct { const base = 10; @@ -7449,7 +7457,7 @@ fn formatIntLiteral( }; undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits); break :blk undef_int.toConst(); - } else data.val.toBigInt(&int_buf, target); + } else data.val.toBigInt(&int_buf, mod); assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits)); const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8); @@ -7684,7 +7692,8 @@ const Vectorize = struct { index: CValue = .none, pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { - return if (ty.zigTypeTag() == .Vector) index: { + const mod = f.object.dg.module; + return if (ty.zigTypeTag(mod) == .Vector) index: { var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() }; const local = try f.allocLocal(inst, Type.usize); @@ -7727,10 +7736,10 @@ const LowerFnRetTyBuffer = struct { values: [1]Value, payload: Type.Payload.AnonStruct, }; -fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) Type { - if (ret_ty.zigTypeTag() == .NoReturn) return Type.initTag(.noreturn); +fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) Type { + if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.initTag(.noreturn); - if (lowersToArray(ret_ty, target)) { + if (lowersToArray(ret_ty, mod)) { buffer.names = [1][]const u8{"array"}; buffer.types = [1]Type{ret_ty}; buffer.values = [1]Value{Value.initTag(.unreachable_value)}; @@ -7742,13 +7751,13 @@ fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) T return Type.initPayload(&buffer.payload.base); } - return if (ret_ty.hasRuntimeBitsIgnoreComptime()) ret_ty else Type.void; + return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void; } -fn lowersToArray(ty: Type, target: std.Target) bool { - return switch (ty.zigTypeTag()) { +fn lowersToArray(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => return true, - else => return ty.isAbiInt() and toCIntBits(@intCast(u32, ty.bitSize(target))) == null, + else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null, }; } diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 6116d070e6c6..5064b84b1dfe 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -292,19 +292,19 @@ pub const CType = extern union { .abi = std.math.log2_int(u32, abi_alignment), }; } - pub fn abiAlign(ty: Type, target: Target) AlignAs { - const abi_align = ty.abiAlignment(target); + pub fn abiAlign(ty: Type, mod: *const Module) AlignAs { + const abi_align = ty.abiAlignment(mod); return init(abi_align, abi_align); } - pub fn fieldAlign(struct_ty: Type, field_i: usize, target: Target) AlignAs { + pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *const Module) AlignAs { return init( - struct_ty.structFieldAlign(field_i, target), - struct_ty.structFieldType(field_i).abiAlignment(target), + struct_ty.structFieldAlign(field_i, mod), + struct_ty.structFieldType(field_i).abiAlignment(mod), ); } - pub fn unionPayloadAlign(union_ty: Type, target: Target) AlignAs { + pub fn unionPayloadAlign(union_ty: Type, mod: *const Module) AlignAs { const union_obj = union_ty.cast(Type.Payload.Union).?.data; - const union_payload_align = union_obj.abiAlignment(target, false); + const union_payload_align = union_obj.abiAlignment(mod, false); return init(union_payload_align, union_payload_align); } @@ -344,8 +344,8 @@ pub const CType = extern union { return self.map.entries.items(.hash)[index - Tag.no_payload_count]; } - pub fn typeToIndex(self: Set, ty: Type, target: Target, kind: Kind) ?Index { - const lookup = Convert.Lookup{ .imm = .{ .set = &self, .target = target } }; + pub fn typeToIndex(self: Set, ty: Type, mod: *Module, kind: Kind) ?Index { + const lookup = Convert.Lookup{ .imm = .{ .set = &self, .mod = mod } }; var convert: Convert = undefined; convert.initType(ty, kind, lookup) catch unreachable; @@ -405,7 +405,7 @@ pub const CType = extern union { ); if (!gop.found_existing) { errdefer _ = self.set.map.pop(); - gop.key_ptr.* = try createFromConvert(self, ty, lookup.getTarget(), kind, convert); + gop.key_ptr.* = try createFromConvert(self, ty, lookup.getModule(), kind, convert); } if (std.debug.runtime_safety) { const adapter = TypeAdapter64{ @@ -1236,10 +1236,10 @@ pub const CType = extern union { } pub const Lookup = union(enum) { - fail: Target, + fail: *Module, imm: struct { set: *const Store.Set, - target: Target, + mod: *Module, }, mut: struct { promoted: *Store.Promoted, @@ -1254,10 +1254,14 @@ pub const CType = extern union { } pub fn getTarget(self: @This()) Target { + return self.getModule().getTarget(); + } + + pub fn getModule(self: @This()) *Module { return switch (self) { - .fail => |target| target, - .imm => |imm| imm.target, - .mut => |mut| mut.mod.getTarget(), + .fail => |mod| mod, + .imm => |imm| imm.mod, + .mut => |mut| mut.mod, }; } @@ -1272,7 +1276,7 @@ pub const CType = extern union { pub fn typeToIndex(self: @This(), ty: Type, kind: Kind) !?Index { return switch (self) { .fail => null, - .imm => |imm| imm.set.typeToIndex(ty, imm.target, kind), + .imm => |imm| imm.set.typeToIndex(ty, imm.mod, kind), .mut => |mut| try mut.promoted.typeToIndex(ty, mut.mod, kind), }; } @@ -1284,7 +1288,7 @@ pub const CType = extern union { pub fn freeze(self: @This()) @This() { return switch (self) { .fail, .imm => self, - .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .target = self.getTarget() } }, + .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .mod = mut.mod } }, }; } }; @@ -1338,7 +1342,7 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "array", .type = array_idx, - .alignas = AlignAs.abiAlign(ty, lookup.getTarget()), + .alignas = AlignAs.abiAlign(ty, lookup.getModule()), }; self.initAnon(kind, fwd_idx, 1); } else self.init(switch (kind) { @@ -1350,12 +1354,12 @@ pub const CType = extern union { } pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void { - const target = lookup.getTarget(); + const mod = lookup.getModule(); self.* = undefined; - if (!ty.isFnOrHasRuntimeBitsIgnoreComptime()) + if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) self.init(.void) - else if (ty.isAbiInt()) switch (ty.tag()) { + else if (ty.isAbiInt(mod)) switch (ty.tag()) { .usize => self.init(.uintptr_t), .isize => self.init(.intptr_t), .c_char => self.init(.char), @@ -1367,13 +1371,13 @@ pub const CType = extern union { .c_ulong => self.init(.@"unsigned long"), .c_longlong => self.init(.@"long long"), .c_ulonglong => self.init(.@"unsigned long long"), - else => switch (tagFromIntInfo(ty.intInfo(target))) { + else => switch (tagFromIntInfo(ty.intInfo(mod))) { .void => unreachable, else => |t| self.init(t), .array => switch (kind) { .forward, .complete, .global => { - const abi_size = ty.abiSize(target); - const abi_align = ty.abiAlignment(target); + const abi_size = ty.abiSize(mod); + const abi_align = ty.abiAlignment(mod); self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{ .len = @divExact(abi_size, abi_align), .elem_type = tagFromIntInfo(.{ @@ -1389,7 +1393,7 @@ pub const CType = extern union { .payload => unreachable, }, }, - } else switch (ty.zigTypeTag()) { + } else switch (ty.zigTypeTag(mod)) { .Frame => unreachable, .AnyFrame => unreachable, @@ -1434,12 +1438,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "ptr", .type = ptr_idx, - .alignas = AlignAs.abiAlign(ptr_ty, target), + .alignas = AlignAs.abiAlign(ptr_ty, mod), }; self.storage.anon.fields[1] = .{ .name = "len", .type = Tag.uintptr_t.toIndex(), - .alignas = AlignAs.abiAlign(Type.usize, target), + .alignas = AlignAs.abiAlign(Type.usize, mod), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1462,12 +1466,8 @@ pub const CType = extern union { }, }; - var host_int_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = info.host_size * 8, - }; const pointee_ty = if (info.host_size > 0 and info.vector_index == .none) - Type.initPayload(&host_int_pl.base) + try mod.intType(.unsigned, info.host_size * 8) else info.pointee_type; @@ -1490,11 +1490,9 @@ pub const CType = extern union { if (ty.castTag(.@"struct")) |struct_obj| { try self.initType(struct_obj.data.backing_int_ty, kind, lookup); } else { - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.bitSize(target)), - }; - try self.initType(Type.initPayload(&buf.base), kind, lookup); + const bits = @intCast(u16, ty.bitSize(mod)); + const int_ty = try mod.intType(.unsigned, bits); + try self.initType(int_ty, kind, lookup); } } else if (ty.isTupleOrAnonStruct()) { if (lookup.isMutable()) { @@ -1505,7 +1503,7 @@ pub const CType = extern union { }) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; _ = try lookup.typeToIndex(field_ty, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter => .complete, @@ -1555,7 +1553,7 @@ pub const CType = extern union { self.storage.anon.fields[field_count] = .{ .name = "payload", .type = payload_idx.?, - .alignas = AlignAs.unionPayloadAlign(ty, target), + .alignas = AlignAs.unionPayloadAlign(ty, mod), }; field_count += 1; } @@ -1563,7 +1561,7 @@ pub const CType = extern union { self.storage.anon.fields[field_count] = .{ .name = "tag", .type = tag_idx.?, - .alignas = AlignAs.abiAlign(tag_ty.?, target), + .alignas = AlignAs.abiAlign(tag_ty.?, mod), }; field_count += 1; } @@ -1576,7 +1574,7 @@ pub const CType = extern union { } }; self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) }; } else self.init(.@"struct"); - } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes()) { + } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes(mod)) { self.init(.void); } else { var is_packed = false; @@ -1586,9 +1584,9 @@ pub const CType = extern union { else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_align = AlignAs.fieldAlign(ty, field_i, target); + const field_align = AlignAs.fieldAlign(ty, field_i, mod); if (field_align.@"align" < field_align.abi) { is_packed = true; if (!lookup.isMutable()) break; @@ -1643,8 +1641,8 @@ pub const CType = extern union { .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { - if (ty.optionalReprIsPayload()) { + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (ty.optionalReprIsPayload(mod)) { try self.initType(payload_ty, kind, lookup); } else if (switch (kind) { .forward, .forward_parameter => @as(Index, undefined), @@ -1661,12 +1659,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "payload", .type = payload_idx, - .alignas = AlignAs.abiAlign(payload_ty, target), + .alignas = AlignAs.abiAlign(payload_ty, mod), }; self.storage.anon.fields[1] = .{ .name = "is_null", .type = Tag.bool.toIndex(), - .alignas = AlignAs.abiAlign(Type.bool, target), + .alignas = AlignAs.abiAlign(Type.bool, mod), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1699,12 +1697,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "payload", .type = payload_idx, - .alignas = AlignAs.abiAlign(payload_ty, target), + .alignas = AlignAs.abiAlign(payload_ty, mod), }; self.storage.anon.fields[1] = .{ .name = "error", .type = error_idx, - .alignas = AlignAs.abiAlign(error_ty, target), + .alignas = AlignAs.abiAlign(error_ty, mod), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1733,7 +1731,7 @@ pub const CType = extern union { }; _ = try lookup.typeToIndex(info.return_type, param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; _ = try lookup.typeToIndex(param_type, param_kind); } } @@ -1900,16 +1898,16 @@ pub const CType = extern union { } } - fn createFromType(store: *Store.Promoted, ty: Type, target: Target, kind: Kind) !CType { + fn createFromType(store: *Store.Promoted, ty: Type, mod: *const Module, kind: Kind) !CType { var convert: Convert = undefined; - try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .target = target } }); - return createFromConvert(store, ty, target, kind, &convert); + try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .mod = mod } }); + return createFromConvert(store, ty, mod, kind, &convert); } fn createFromConvert( store: *Store.Promoted, ty: Type, - target: Target, + mod: *Module, kind: Kind, convert: Convert, ) !CType { @@ -1930,7 +1928,7 @@ pub const CType = extern union { .packed_struct, .packed_union, => { - const zig_ty_tag = ty.zigTypeTag(); + const zig_ty_tag = ty.zigTypeTag(mod); const fields_len = switch (zig_ty_tag) { .Struct => ty.structFieldCount(), .Union => ty.unionFields().count(), @@ -1941,7 +1939,7 @@ pub const CType = extern union { for (0..fields_len) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; c_fields_len += 1; } @@ -1950,7 +1948,7 @@ pub const CType = extern union { for (0..fields_len) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; defer c_field_i += 1; fields_pl[c_field_i] = .{ @@ -1962,12 +1960,12 @@ pub const CType = extern union { .Union => ty.unionFields().keys()[field_i], else => unreachable, }), - .type = store.set.typeToIndex(field_ty, target, switch (kind) { + .type = store.set.typeToIndex(field_ty, mod, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter, .payload => .complete, .global => .global, }).?, - .alignas = AlignAs.fieldAlign(ty, field_i, target), + .alignas = AlignAs.fieldAlign(ty, field_i, mod), }; } @@ -2004,7 +2002,7 @@ pub const CType = extern union { const struct_pl = try arena.create(Payload.Aggregate); struct_pl.* = .{ .base = .{ .tag = t }, .data = .{ .fields = fields_pl, - .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, + .fwd_decl = store.set.typeToIndex(ty, mod, .forward).?, } }; return initPayload(struct_pl); }, @@ -2026,21 +2024,21 @@ pub const CType = extern union { var c_params_len: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; c_params_len += 1; } const params_pl = try arena.alloc(Index, c_params_len); var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; - params_pl[c_param_i] = store.set.typeToIndex(param_type, target, param_kind).?; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; + params_pl[c_param_i] = store.set.typeToIndex(param_type, mod, param_kind).?; c_param_i += 1; } const fn_pl = try arena.create(Payload.Function); fn_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .return_type = store.set.typeToIndex(info.return_type, target, param_kind).?, + .return_type = store.set.typeToIndex(info.return_type, mod, param_kind).?, .param_types = params_pl, } }; return initPayload(fn_pl); @@ -2067,12 +2065,12 @@ pub const CType = extern union { } pub fn eql(self: @This(), ty: Type, cty: CType) bool { + const mod = self.lookup.getModule(); switch (self.convert.value) { .cty => |c| return c.eql(cty), .tag => |t| { if (t != cty.tag()) return false; - const target = self.lookup.getTarget(); switch (t) { .fwd_anon_struct, .fwd_anon_union, @@ -2084,7 +2082,7 @@ pub const CType = extern union { ]u8 = undefined; const c_fields = cty.cast(Payload.Fields).?.data; - const zig_ty_tag = ty.zigTypeTag(); + const zig_ty_tag = ty.zigTypeTag(mod); var c_field_i: usize = 0; for (0..switch (zig_ty_tag) { .Struct => ty.structFieldCount(), @@ -2093,7 +2091,7 @@ pub const CType = extern union { }) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; defer c_field_i += 1; const c_field = &c_fields[c_field_i]; @@ -2113,7 +2111,7 @@ pub const CType = extern union { else => unreachable, }, mem.span(c_field.name), - ) or AlignAs.fieldAlign(ty, field_i, target).@"align" != + ) or AlignAs.fieldAlign(ty, field_i, mod).@"align" != c_field.alignas.@"align") return false; } return true; @@ -2146,7 +2144,7 @@ pub const CType = extern union { .function, .varargs_function, => { - if (ty.zigTypeTag() != .Fn) return false; + if (ty.zigTypeTag(mod) != .Fn) return false; const info = ty.fnInfo(); assert(!info.is_generic); @@ -2162,7 +2160,7 @@ pub const CType = extern union { var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; if (c_param_i >= data.param_types.len) return false; const param_cty = data.param_types[c_param_i]; @@ -2202,7 +2200,7 @@ pub const CType = extern union { .tag => |t| { autoHash(hasher, t); - const target = self.lookup.getTarget(); + const mod = self.lookup.getModule(); switch (t) { .fwd_anon_struct, .fwd_anon_union, @@ -2211,15 +2209,15 @@ pub const CType = extern union { std.fmt.count("f{}", .{std.math.maxInt(usize)}) ]u8 = undefined; - const zig_ty_tag = ty.zigTypeTag(); - for (0..switch (ty.zigTypeTag()) { + const zig_ty_tag = ty.zigTypeTag(mod); + for (0..switch (ty.zigTypeTag(mod)) { .Struct => ty.structFieldCount(), .Union => ty.unionFields().count(), else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; self.updateHasherRecurse(hasher, field_ty, switch (self.kind) { .forward, .forward_parameter => .forward, @@ -2234,7 +2232,7 @@ pub const CType = extern union { .Union => ty.unionFields().keys()[field_i], else => unreachable, }); - autoHash(hasher, AlignAs.fieldAlign(ty, field_i, target).@"align"); + autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align"); } }, @@ -2271,7 +2269,7 @@ pub const CType = extern union { self.updateHasherRecurse(hasher, info.return_type, param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; self.updateHasherRecurse(hasher, param_type, param_kind); } }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index beb230945579..c3d3da0d322e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -598,7 +598,7 @@ pub const Object = struct { }; const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False); const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const slice_alignment = slice_ty.abiAlignment(target); + const slice_alignment = slice_ty.abiAlignment(mod); const error_name_list = mod.error_name_list.items; const llvm_errors = try mod.gpa.alloc(*llvm.Value, error_name_list.len); @@ -880,28 +880,28 @@ pub const Object = struct { pub fn updateFunc( o: *Object, - module: *Module, + mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness, ) !void { const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); - const target = module.getTarget(); + const decl = mod.declPtr(decl_index); + const target = mod.getTarget(); var dg: DeclGen = .{ .context = o.context, .object = o, - .module = module, + .module = mod, .decl_index = decl_index, .decl = decl, .err_msg = null, - .gpa = module.gpa, + .gpa = mod.gpa, }; const llvm_func = try dg.resolveLlvmFunction(decl_index); - if (module.align_stack_fns.get(func)) |align_info| { + if (mod.align_stack_fns.get(func)) |align_info| { dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment); dg.addFnAttr(llvm_func, "noinline"); } else { @@ -922,7 +922,7 @@ pub const Object = struct { } // TODO: disable this if safety is off for the function scope - const ssp_buf_size = module.comp.bin_file.options.stack_protector; + const ssp_buf_size = mod.comp.bin_file.options.stack_protector; if (ssp_buf_size != 0) { var buf: [12]u8 = undefined; const arg = std.fmt.bufPrintZ(&buf, "{d}", .{ssp_buf_size}) catch unreachable; @@ -931,7 +931,7 @@ pub const Object = struct { } // TODO: disable this if safety is off for the function scope - if (module.comp.bin_file.options.stack_check) { + if (mod.comp.bin_file.options.stack_check) { dg.addFnAttrString(llvm_func, "probe-stack", "__zig_probe_stack"); } else if (target.os.tag == .uefi) { dg.addFnAttrString(llvm_func, "no-stack-arg-probe", ""); @@ -954,17 +954,17 @@ pub const Object = struct { // This gets the LLVM values from the function and stores them in `dg.args`. const fn_info = decl.ty.fnInfo(); - const sret = firstParamSRet(fn_info, target); + const sret = firstParamSRet(fn_info, mod); const ret_ptr = if (sret) llvm_func.getParam(0) else null; const gpa = dg.gpa; - if (ccAbiPromoteInt(fn_info.cc, target, fn_info.return_type)) |s| switch (s) { + if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type)) |s| switch (s) { .signed => dg.addAttr(llvm_func, 0, "signext"), .unsigned => dg.addAttr(llvm_func, 0, "zeroext"), }; - const err_return_tracing = fn_info.return_type.isError() and - module.comp.bin_file.options.error_return_tracing; + const err_return_tracing = fn_info.return_type.isError(mod) and + mod.comp.bin_file.options.error_return_tracing; const err_ret_trace = if (err_return_tracing) llvm_func.getParam(@boolToInt(ret_ptr != null)) @@ -989,8 +989,8 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { - const alignment = param_ty.abiAlignment(target); + if (isByRef(param_ty, mod)) { + const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = param.typeOf(); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); const store_inst = builder.buildStore(param, arg_ptr); @@ -1007,14 +1007,14 @@ pub const Object = struct { const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); dg.addByRefParamAttrs(llvm_func, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty); llvm_arg_i += 1; try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(param); } else { const load_inst = builder.buildLoad(param_llvm_ty, param, ""); @@ -1026,14 +1026,14 @@ pub const Object = struct { const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); dg.addArgAttr(llvm_func, llvm_arg_i, "noundef"); llvm_arg_i += 1; try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(param); } else { const load_inst = builder.buildLoad(param_llvm_ty, param, ""); @@ -1048,10 +1048,10 @@ pub const Object = struct { llvm_arg_i += 1; const param_llvm_ty = try dg.lowerType(param_ty); - const abi_size = @intCast(c_uint, param_ty.abiSize(target)); + const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); const int_llvm_ty = dg.context.intType(abi_size * 8); const alignment = @max( - param_ty.abiAlignment(target), + param_ty.abiAlignment(mod), dg.object.target_data.abiAlignmentOfType(int_llvm_ty), ); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); @@ -1060,7 +1060,7 @@ pub const Object = struct { try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(arg_ptr); } else { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); @@ -1078,7 +1078,7 @@ pub const Object = struct { dg.addArgAttr(llvm_func, llvm_arg_i, "noalias"); } } - if (param_ty.zigTypeTag() != .Optional) { + if (param_ty.zigTypeTag(mod) != .Optional) { dg.addArgAttr(llvm_func, llvm_arg_i, "nonnull"); } if (!ptr_info.mutable) { @@ -1087,7 +1087,7 @@ pub const Object = struct { if (ptr_info.@"align" != 0) { dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", ptr_info.@"align"); } else { - const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1); + const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1); dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align); } const ptr_param = llvm_func.getParam(llvm_arg_i); @@ -1105,7 +1105,7 @@ pub const Object = struct { const field_types = it.llvm_types_buffer[0..it.llvm_types_len]; const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); - const param_alignment = param_ty.abiAlignment(target); + const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False); for (field_types, 0..) |_, field_i_usize| { @@ -1117,7 +1117,7 @@ pub const Object = struct { store_inst.setAlignment(target.ptrBitWidth() / 8); } - const is_by_ref = isByRef(param_ty); + const is_by_ref = isByRef(param_ty, mod); const loaded = if (is_by_ref) arg_ptr else l: { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); load_inst.setAlignment(param_alignment); @@ -1139,11 +1139,11 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); _ = builder.buildStore(param, arg_ptr); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { try args.append(arg_ptr); } else { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); @@ -1157,11 +1157,11 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); _ = builder.buildStore(param, arg_ptr); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { try args.append(arg_ptr); } else { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); @@ -1180,7 +1180,7 @@ pub const Object = struct { const line_number = decl.src_line + 1; const is_internal_linkage = decl.val.tag() != .extern_fn and - !module.decl_exports.contains(decl_index); + !mod.decl_exports.contains(decl_index); const noret_bit: c_uint = if (fn_info.return_type.isNoReturn()) llvm.DIFlags.NoReturn else @@ -1196,7 +1196,7 @@ pub const Object = struct { true, // is definition line_number + func.lbrace_line, // scope line llvm.DIFlags.StaticMember | noret_bit, - module.comp.bin_file.options.optimize_mode != .Debug, + mod.comp.bin_file.options.optimize_mode != .Debug, null, // decl_subprogram ); try dg.object.di_map.put(gpa, decl, subprogram.toNode()); @@ -1219,7 +1219,7 @@ pub const Object = struct { .func_inst_table = .{}, .llvm_func = llvm_func, .blocks = .{}, - .single_threaded = module.comp.bin_file.options.single_threaded, + .single_threaded = mod.comp.bin_file.options.single_threaded, .di_scope = di_scope, .di_file = di_file, .base_line = dg.decl.src_line, @@ -1232,14 +1232,14 @@ pub const Object = struct { fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?); + try mod.failed_decls.put(mod.gpa, decl_index, dg.err_msg.?); dg.err_msg = null; return; }, else => |e| return e, }; - try o.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + try o.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn updateDecl(self: *Object, module: *Module, decl_index: Module.Decl.Index) !void { @@ -1275,37 +1275,40 @@ pub const Object = struct { pub fn updateDeclExports( self: *Object, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { + const gpa = mod.gpa; // If the module does not already have the function, we ignore this function call // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. const llvm_global = self.decl_map.get(decl_index) orelse return; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.isExtern()) { - const is_wasm_fn = module.getTarget().isWasm() and try decl.isFunction(); + const is_wasm_fn = mod.getTarget().isWasm() and try decl.isFunction(mod); const mangle_name = is_wasm_fn and decl.getExternFn().?.lib_name != null and !std.mem.eql(u8, std.mem.sliceTo(decl.getExternFn().?.lib_name.?, 0), "c"); const decl_name = if (mangle_name) name: { - const tmp = try std.fmt.allocPrintZ(module.gpa, "{s}|{s}", .{ decl.name, decl.getExternFn().?.lib_name.? }); + const tmp = try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ + decl.name, decl.getExternFn().?.lib_name.?, + }); break :name tmp.ptr; } else decl.name; - defer if (mangle_name) module.gpa.free(std.mem.sliceTo(decl_name, 0)); + defer if (mangle_name) gpa.free(std.mem.sliceTo(decl_name, 0)); llvm_global.setValueName(decl_name); if (self.getLlvmGlobal(decl_name)) |other_global| { if (other_global != llvm_global) { log.debug("updateDeclExports isExtern()=true setValueName({s}) conflict", .{decl.name}); - try self.extern_collisions.put(module.gpa, decl_index, {}); + try self.extern_collisions.put(gpa, decl_index, {}); } } llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); - if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default); + if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); if (self.di_map.get(decl)) |di_node| { - if (try decl.isFunction()) { + if (try decl.isFunction(mod)) { const di_func = @ptrCast(*llvm.DISubprogram, di_node); const linkage_name = llvm.MDString.get(self.context, decl.name, std.mem.len(decl.name)); di_func.replaceLinkageName(linkage_name); @@ -1329,9 +1332,9 @@ pub const Object = struct { const exp_name = exports[0].options.name; llvm_global.setValueName2(exp_name.ptr, exp_name.len); llvm_global.setUnnamedAddr(.False); - if (module.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); + if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); if (self.di_map.get(decl)) |di_node| { - if (try decl.isFunction()) { + if (try decl.isFunction(mod)) { const di_func = @ptrCast(*llvm.DISubprogram, di_node); const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len); di_func.replaceLinkageName(linkage_name); @@ -1353,8 +1356,8 @@ pub const Object = struct { .protected => llvm_global.setVisibility(.Protected), } if (exports[0].options.section) |section| { - const section_z = try module.gpa.dupeZ(u8, section); - defer module.gpa.free(section_z); + const section_z = try gpa.dupeZ(u8, section); + defer gpa.free(section_z); llvm_global.setSection(section_z); } if (decl.val.castTag(.variable)) |variable| { @@ -1370,8 +1373,8 @@ pub const Object = struct { // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. for (exports[1..]) |exp| { - const exp_name_z = try module.gpa.dupeZ(u8, exp.options.name); - defer module.gpa.free(exp_name_z); + const exp_name_z = try gpa.dupeZ(u8, exp.options.name); + defer gpa.free(exp_name_z); if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| { alias.setAliasee(llvm_global); @@ -1385,14 +1388,14 @@ pub const Object = struct { } } } else { - const fqn = try decl.getFullyQualifiedName(module); - defer module.gpa.free(fqn); + const fqn = try decl.getFullyQualifiedName(mod); + defer gpa.free(fqn); llvm_global.setValueName2(fqn.ptr, fqn.len); llvm_global.setLinkage(.Internal); - if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default); + if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); llvm_global.setUnnamedAddr(.True); if (decl.val.castTag(.variable)) |variable| { - const single_threaded = module.comp.bin_file.options.single_threaded; + const single_threaded = mod.comp.bin_file.options.single_threaded; if (variable.data.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { @@ -1479,14 +1482,15 @@ pub const Object = struct { const gpa = o.gpa; const target = o.target; const dib = o.di_builder.?; - switch (ty.zigTypeTag()) { + const mod = o.module; + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => { const di_type = dib.createBasicType("void", 0, DW.ATE.signed); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); return di_type; }, .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); assert(info.bits != 0); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); @@ -1494,7 +1498,7 @@ pub const Object = struct { .signed => DW.ATE.signed, .unsigned => DW.ATE.unsigned, }; - const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types + const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types const di_type = dib.createBasicType(name, di_bits, dwarf_encoding); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); return di_type; @@ -1503,7 +1507,7 @@ pub const Object = struct { const owner_decl_index = ty.getOwnerDecl(); const owner_decl = o.module.declPtr(owner_decl_index); - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. @@ -1522,9 +1526,8 @@ pub const Object = struct { }; const field_index_val = Value.initPayload(&buf_field_index.base); - var buffer: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&buffer); - const int_info = ty.intInfo(target); + const int_ty = ty.intTagType(); + const int_info = ty.intInfo(mod); assert(int_info.bits != 0); for (field_names, 0..) |field_name, i| { @@ -1536,7 +1539,7 @@ pub const Object = struct { const field_int_val = field_index_val.enumToInt(ty, &buf_u64); var bigint_space: Value.BigIntSpace = undefined; - const bigint = field_int_val.toBigInt(&bigint_space, target); + const bigint = field_int_val.toBigInt(&bigint_space, mod); if (bigint.limbs.len == 1) { enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned); @@ -1566,8 +1569,8 @@ pub const Object = struct { name, di_file, owner_decl.src_node + 1, - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, + ty.abiSize(mod) * 8, + ty.abiAlignment(mod) * 8, enumerators.ptr, @intCast(c_int, enumerators.len), try o.lowerDebugType(int_ty, .full), @@ -1604,7 +1607,7 @@ pub const Object = struct { !ptr_info.mutable or ptr_info.@"volatile" or ptr_info.size == .Many or ptr_info.size == .C or - !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) + !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) { var payload: Type.Payload.Pointer = .{ .data = .{ @@ -1623,7 +1626,7 @@ pub const Object = struct { }, }, }; - if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) { + if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) { payload.data.pointee_type = Type.anyopaque; } const bland_ptr_ty = Type.initPayload(&payload.base); @@ -1657,10 +1660,10 @@ pub const Object = struct { break :blk fwd_decl; }; - const ptr_size = ptr_ty.abiSize(target); - const ptr_align = ptr_ty.abiAlignment(target); - const len_size = len_ty.abiSize(target); - const len_align = len_ty.abiAlignment(target); + const ptr_size = ptr_ty.abiSize(mod); + const ptr_align = ptr_ty.abiAlignment(mod); + const len_size = len_ty.abiSize(mod); + const len_align = len_ty.abiAlignment(mod); var offset: u64 = 0; offset += ptr_size; @@ -1697,8 +1700,8 @@ pub const Object = struct { name.ptr, di_file, line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &fields, @@ -1719,7 +1722,7 @@ pub const Object = struct { const ptr_di_ty = dib.createPointerType( elem_di_ty, target.ptrBitWidth(), - ty.ptrAlignment(target) * 8, + ty.ptrAlignment(mod) * 8, name, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. @@ -1750,8 +1753,8 @@ pub const Object = struct { }, .Array => { const array_di_ty = dib.createArrayType( - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, + ty.abiSize(mod) * 8, + ty.abiAlignment(mod) * 8, try o.lowerDebugType(ty.childType(), .full), @intCast(c_int, ty.arrayLen()), ); @@ -1760,14 +1763,14 @@ pub const Object = struct { return array_di_ty; }, .Vector => { - const elem_ty = ty.elemType2(); + const elem_ty = ty.elemType2(mod); // Vector elements cannot be padded since that would make // @bitSizOf(elem) * len > @bitSizOf(vec). // Neither gdb nor lldb seem to be able to display non-byte sized // vectors properly. - const elem_di_type = switch (elem_ty.zigTypeTag()) { + const elem_di_type = switch (elem_ty.zigTypeTag(mod)) { .Int => blk: { - const info = elem_ty.intInfo(target); + const info = elem_ty.intInfo(mod); assert(info.bits != 0); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); @@ -1782,8 +1785,8 @@ pub const Object = struct { }; const vector_di_ty = dib.createVectorType( - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, + ty.abiSize(mod) * 8, + ty.abiAlignment(mod) * 8, elem_di_type, ty.vectorLen(), ); @@ -1796,13 +1799,13 @@ pub const Object = struct { defer gpa.free(name); var buf: Type.Payload.ElemType = undefined; const child_ty = ty.optionalChild(&buf); - if (!child_ty.hasRuntimeBitsIgnoreComptime()) { + if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { const di_bits = 8; // lldb cannot handle non-byte sized types const di_ty = dib.createBasicType(name, di_bits, DW.ATE.boolean); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { const ptr_di_ty = try o.lowerDebugType(child_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module }); @@ -1826,10 +1829,10 @@ pub const Object = struct { }; const non_null_ty = Type.u8; - const payload_size = child_ty.abiSize(target); - const payload_align = child_ty.abiAlignment(target); - const non_null_size = non_null_ty.abiSize(target); - const non_null_align = non_null_ty.abiAlignment(target); + const payload_size = child_ty.abiSize(mod); + const payload_align = child_ty.abiAlignment(mod); + const non_null_size = non_null_ty.abiSize(mod); + const non_null_align = non_null_ty.abiAlignment(mod); var offset: u64 = 0; offset += payload_size; @@ -1866,8 +1869,8 @@ pub const Object = struct { name.ptr, di_file, line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &fields, @@ -1883,7 +1886,7 @@ pub const Object = struct { }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module }); @@ -1907,10 +1910,10 @@ pub const Object = struct { break :blk fwd_decl; }; - const error_size = Type.anyerror.abiSize(target); - const error_align = Type.anyerror.abiAlignment(target); - const payload_size = payload_ty.abiSize(target); - const payload_align = payload_ty.abiAlignment(target); + const error_size = Type.anyerror.abiSize(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const payload_size = payload_ty.abiSize(mod); + const payload_align = payload_ty.abiAlignment(mod); var error_index: u32 = undefined; var payload_index: u32 = undefined; @@ -1957,8 +1960,8 @@ pub const Object = struct { name.ptr, di_file, line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &fields, @@ -1988,12 +1991,12 @@ pub const Object = struct { const struct_obj = payload.data; if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { assert(struct_obj.haveLayout()); - const info = struct_obj.backing_int_ty.intInfo(target); + const info = struct_obj.backing_int_ty.intInfo(mod); const dwarf_encoding: c_uint = switch (info.signedness) { .signed => DW.ATE.signed, .unsigned => DW.ATE.unsigned, }; - const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types + const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types const di_ty = dib.createBasicType(name, di_bits, dwarf_encoding); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; @@ -2026,10 +2029,10 @@ pub const Object = struct { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; - const field_size = field_ty.abiSize(target); - const field_align = field_ty.abiAlignment(target); + const field_size = field_ty.abiSize(mod); + const field_align = field_ty.abiAlignment(mod); const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = field_offset + field_size; @@ -2057,8 +2060,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from di_fields.items.ptr, @@ -2093,7 +2096,7 @@ pub const Object = struct { } } - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { const owner_decl_index = ty.getOwnerDecl(); const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, struct_di_ty); @@ -2114,11 +2117,11 @@ pub const Object = struct { comptime assert(struct_layout_version == 2); var offset: u64 = 0; - var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(); + var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_size = field.ty.abiSize(target); - const field_align = field.alignment(target, layout); + const field_size = field.ty.abiSize(mod); + const field_align = field.alignment(mod, layout); const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = field_offset + field_size; @@ -2143,8 +2146,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from di_fields.items.ptr, @@ -2179,7 +2182,7 @@ pub const Object = struct { }; const union_obj = ty.cast(Type.Payload.Union).?.data; - if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime()) { + if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime(mod)) { const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, union_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` @@ -2188,7 +2191,7 @@ pub const Object = struct { return union_di_ty; } - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { const tag_di_ty = try o.lowerDebugType(union_obj.tag_ty, .full); @@ -2198,8 +2201,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &di_fields, @@ -2225,10 +2228,10 @@ pub const Object = struct { const field_name = kv.key_ptr.*; const field = kv.value_ptr.*; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_size = field.ty.abiSize(target); - const field_align = field.normalAlignment(target); + const field_size = field.ty.abiSize(mod); + const field_align = field.normalAlignment(mod); const field_name_copy = try gpa.dupeZ(u8, field_name); defer gpa.free(field_name_copy); @@ -2258,8 +2261,8 @@ pub const Object = struct { union_name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags di_fields.items.ptr, @intCast(c_int, di_fields.items.len), @@ -2319,8 +2322,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &full_di_fields, @@ -2341,8 +2344,8 @@ pub const Object = struct { defer param_di_types.deinit(); // Return type goes first. - if (fn_info.return_type.hasRuntimeBitsIgnoreComptime()) { - const sret = firstParamSRet(fn_info, target); + if (fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) { + const sret = firstParamSRet(fn_info, mod); const di_ret_ty = if (sret) Type.void else fn_info.return_type; try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full)); @@ -2358,7 +2361,7 @@ pub const Object = struct { try param_di_types.append(try o.lowerDebugType(Type.void, .full)); } - if (fn_info.return_type.isError() and + if (fn_info.return_type.isError(mod) and o.module.comp.bin_file.options.error_return_tracing) { var ptr_ty_payload: Type.Payload.ElemType = .{ @@ -2370,9 +2373,9 @@ pub const Object = struct { } for (fn_info.param_types) |param_ty| { - if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = param_ty, @@ -2450,7 +2453,7 @@ pub const Object = struct { const stack_trace_str: []const u8 = "StackTrace"; // buffer is only used for int_type, `builtin` is a struct. - const builtin_ty = mod.declPtr(builtin_decl).val.toType(undefined); + const builtin_ty = mod.declPtr(builtin_decl).val.toType(); const builtin_namespace = builtin_ty.getNamespace().?; const stack_trace_decl_index = builtin_namespace.decls .getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .mod = mod }).?; @@ -2458,7 +2461,7 @@ pub const Object = struct { // Sema should have ensured that StackTrace was analyzed. assert(stack_trace_decl.has_tv); - return stack_trace_decl.val.toType(undefined); + return stack_trace_decl.val.toType(); } }; @@ -2495,9 +2498,10 @@ pub const DeclGen = struct { if (decl.val.castTag(.extern_fn)) |extern_fn| { _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl); } else { - const target = dg.module.getTarget(); + const mod = dg.module; + const target = mod.getTarget(); var global = try dg.resolveGlobalDecl(decl_index); - global.setAlignment(decl.getAlignment(target)); + global.setAlignment(decl.getAlignment(mod)); if (decl.@"linksection") |section| global.setSection(section); assert(decl.has_tv); const init_val = if (decl.val.castTag(.variable)) |payload| init_val: { @@ -2569,19 +2573,20 @@ pub const DeclGen = struct { /// Note that this can be called before the function's semantic analysis has /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. fn resolveLlvmFunction(dg: *DeclGen, decl_index: Module.Decl.Index) !*llvm.Value { - const decl = dg.module.declPtr(decl_index); + const mod = dg.module; + const decl = mod.declPtr(decl_index); const zig_fn_type = decl.ty; const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl_index); if (gop.found_existing) return gop.value_ptr.*; assert(decl.has_tv); const fn_info = zig_fn_type.fnInfo(); - const target = dg.module.getTarget(); - const sret = firstParamSRet(fn_info, target); + const target = mod.getTarget(); + const sret = firstParamSRet(fn_info, mod); const fn_type = try dg.lowerType(zig_fn_type); - const fqn = try decl.getFullyQualifiedName(dg.module); + const fqn = try decl.getFullyQualifiedName(mod); defer dg.gpa.free(fqn); const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); @@ -2593,7 +2598,7 @@ pub const DeclGen = struct { llvm_fn.setLinkage(.Internal); llvm_fn.setUnnamedAddr(.True); } else { - if (dg.module.getTarget().isWasm()) { + if (target.isWasm()) { dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); if (decl.getExternFn().?.lib_name) |lib_name| { const module_name = std.mem.sliceTo(lib_name, 0); @@ -2612,8 +2617,8 @@ pub const DeclGen = struct { llvm_fn.addSretAttr(raw_llvm_ret_ty); } - const err_return_tracing = fn_info.return_type.isError() and - dg.module.comp.bin_file.options.error_return_tracing; + const err_return_tracing = fn_info.return_type.isError(mod) and + mod.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { dg.addArgAttr(llvm_fn, @boolToInt(sret), "nonnull"); @@ -2656,14 +2661,14 @@ pub const DeclGen = struct { .byval => { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types[param_index]; - if (!isByRef(param_ty)) { + if (!isByRef(param_ty, mod)) { dg.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); dg.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { @@ -2784,12 +2789,13 @@ pub const DeclGen = struct { fn lowerType(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type { const llvm_ty = try lowerTypeInner(dg, t); + const mod = dg.module; if (std.debug.runtime_safety and false) check: { - if (t.zigTypeTag() == .Opaque) break :check; - if (!t.hasRuntimeBits()) break :check; + if (t.zigTypeTag(mod) == .Opaque) break :check; + if (!t.hasRuntimeBits(mod)) break :check; if (!llvm_ty.isSized().toBool()) break :check; - const zig_size = t.abiSize(dg.module.getTarget()); + const zig_size = t.abiSize(mod); const llvm_size = dg.object.target_data.abiSizeOfType(llvm_ty); if (llvm_size != zig_size) { log.err("when lowering {}, Zig ABI size = {d} but LLVM ABI size = {d}", .{ @@ -2802,18 +2808,18 @@ pub const DeclGen = struct { fn lowerTypeInner(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type { const gpa = dg.gpa; - const target = dg.module.getTarget(); - switch (t.zigTypeTag()) { + const mod = dg.module; + const target = mod.getTarget(); + switch (t.zigTypeTag(mod)) { .Void, .NoReturn => return dg.context.voidType(), .Int => { - const info = t.intInfo(target); + const info = t.intInfo(mod); assert(info.bits != 0); return dg.context.intType(info.bits); }, .Enum => { - var buffer: Type.Payload.Bits = undefined; - const int_ty = t.intTagType(&buffer); - const bit_count = int_ty.intInfo(target).bits; + const int_ty = t.intTagType(); + const bit_count = int_ty.intInfo(mod).bits; assert(bit_count != 0); return dg.context.intType(bit_count); }, @@ -2863,7 +2869,7 @@ pub const DeclGen = struct { }, .Array => { const elem_ty = t.childType(); - assert(elem_ty.onePossibleValue() == null); + assert(elem_ty.onePossibleValue(mod) == null); const elem_llvm_ty = try dg.lowerType(elem_ty); const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); @@ -2875,11 +2881,11 @@ pub const DeclGen = struct { .Optional => { var buf: Type.Payload.ElemType = undefined; const child_ty = t.optionalChild(&buf); - if (!child_ty.hasRuntimeBitsIgnoreComptime()) { + if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.context.intType(8); } const payload_llvm_ty = try dg.lowerType(child_ty); - if (t.optionalReprIsPayload()) { + if (t.optionalReprIsPayload(mod)) { return payload_llvm_ty; } @@ -2887,8 +2893,8 @@ pub const DeclGen = struct { var fields_buf: [3]*llvm.Type = .{ payload_llvm_ty, dg.context.intType(8), undefined, }; - const offset = child_ty.abiSize(target) + 1; - const abi_size = t.abiSize(target); + const offset = child_ty.abiSize(mod) + 1; + const abi_size = t.abiSize(mod); const padding = @intCast(c_uint, abi_size - offset); if (padding == 0) { return dg.context.structType(&fields_buf, 2, .False); @@ -2898,17 +2904,17 @@ pub const DeclGen = struct { }, .ErrorUnion => { const payload_ty = t.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try dg.lowerType(Type.anyerror); } const llvm_error_type = try dg.lowerType(Type.anyerror); const llvm_payload_type = try dg.lowerType(payload_ty); - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); - const payload_size = payload_ty.abiSize(target); - const error_size = Type.anyerror.abiSize(target); + const payload_size = payload_ty.abiSize(mod); + const error_size = Type.anyerror.abiSize(mod); var fields_buf: [3]*llvm.Type = undefined; if (error_align > payload_align) { @@ -2964,9 +2970,9 @@ pub const DeclGen = struct { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -2979,7 +2985,7 @@ pub const DeclGen = struct { const field_llvm_ty = try dg.lowerType(field_ty); try llvm_field_types.append(gpa, field_llvm_ty); - offset += field_ty.abiSize(target); + offset += field_ty.abiSize(mod); } { const prev_offset = offset; @@ -3027,11 +3033,11 @@ pub const DeclGen = struct { var big_align: u32 = 1; var any_underaligned_fields = false; - var it = struct_obj.runtimeFieldIterator(); + var it = struct_obj.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_align = field.alignment(target, struct_obj.layout); - const field_ty_align = field.ty.abiAlignment(target); + const field_align = field.alignment(mod, struct_obj.layout); + const field_ty_align = field.ty.abiAlignment(mod); any_underaligned_fields = any_underaligned_fields or field_align < field_ty_align; big_align = @max(big_align, field_align); @@ -3046,7 +3052,7 @@ pub const DeclGen = struct { const field_llvm_ty = try dg.lowerType(field.ty); try llvm_field_types.append(gpa, field_llvm_ty); - offset += field.ty.abiSize(target); + offset += field.ty.abiSize(mod); } { const prev_offset = offset; @@ -3074,11 +3080,11 @@ pub const DeclGen = struct { // reference, we need to copy it here. gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - const layout = t.unionGetLayout(target); + const layout = t.unionGetLayout(mod); const union_obj = t.cast(Type.Payload.Union).?.data; if (union_obj.layout == .Packed) { - const bitsize = @intCast(c_uint, t.bitSize(target)); + const bitsize = @intCast(c_uint, t.bitSize(mod)); const int_llvm_ty = dg.context.intType(bitsize); gop.value_ptr.* = int_llvm_ty; return int_llvm_ty; @@ -3155,19 +3161,19 @@ pub const DeclGen = struct { } fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*llvm.Type { - const target = dg.module.getTarget(); + const mod = dg.module; const fn_info = fn_ty.fnInfo(); const llvm_ret_ty = try lowerFnRetTy(dg, fn_info); var llvm_params = std.ArrayList(*llvm.Type).init(dg.gpa); defer llvm_params.deinit(); - if (firstParamSRet(fn_info, target)) { + if (firstParamSRet(fn_info, mod)) { try llvm_params.append(dg.context.pointerType(0)); } - if (fn_info.return_type.isError() and - dg.module.comp.bin_file.options.error_return_tracing) + if (fn_info.return_type.isError(mod) and + mod.comp.bin_file.options.error_return_tracing) { var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -3189,14 +3195,14 @@ pub const DeclGen = struct { }, .abi_sized_int => { const param_ty = fn_info.param_types[it.zig_index - 1]; - const abi_size = @intCast(c_uint, param_ty.abiSize(target)); + const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); try llvm_params.append(dg.context.intType(abi_size * 8)); }, .slice => { const param_ty = fn_info.param_types[it.zig_index - 1]; var buf: Type.SlicePtrFieldTypeBuffer = undefined; var opt_buf: Type.Payload.ElemType = undefined; - const ptr_ty = if (param_ty.zigTypeTag() == .Optional) + const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) param_ty.optionalChild(&opt_buf).slicePtrFieldType(&buf) else param_ty.slicePtrFieldType(&buf); @@ -3215,7 +3221,7 @@ pub const DeclGen = struct { }, .float_array => |count| { const param_ty = fn_info.param_types[it.zig_index - 1]; - const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty).?); + const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); const field_count = @intCast(c_uint, count); const arr_ty = float_ty.arrayType(field_count); try llvm_params.append(arr_ty); @@ -3239,11 +3245,12 @@ pub const DeclGen = struct { /// being a zero bit type, but it should still be lowered as an i8 in such case. /// There are other similar cases handled here as well. fn lowerPtrElemTy(dg: *DeclGen, elem_ty: Type) Allocator.Error!*llvm.Type { - const lower_elem_ty = switch (elem_ty.zigTypeTag()) { + const mod = dg.module; + const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, .Fn => !elem_ty.fnInfo().is_generic, - .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(), - else => elem_ty.hasRuntimeBitsIgnoreComptime(), + .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(mod), + else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), }; const llvm_elem_ty = if (lower_elem_ty) try dg.lowerType(elem_ty) @@ -3262,9 +3269,9 @@ pub const DeclGen = struct { const llvm_type = try dg.lowerType(tv.ty); return llvm_type.getUndef(); } - const target = dg.module.getTarget(); - - switch (tv.ty.zigTypeTag()) { + const mod = dg.module; + const target = mod.getTarget(); + switch (tv.ty.zigTypeTag(mod)) { .Bool => { const llvm_type = try dg.lowerType(tv.ty); return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); @@ -3276,8 +3283,8 @@ pub const DeclGen = struct { .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), else => { var bigint_space: Value.BigIntSpace = undefined; - const bigint = tv.val.toBigInt(&bigint_space, target); - const int_info = tv.ty.intInfo(target); + const bigint = tv.val.toBigInt(&bigint_space, mod); + const int_info = tv.ty.intInfo(mod); assert(int_info.bits != 0); const llvm_type = dg.context.intType(int_info.bits); @@ -3304,9 +3311,9 @@ pub const DeclGen = struct { const int_val = tv.enumToInt(&int_buffer); var bigint_space: Value.BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_space, target); + const bigint = int_val.toBigInt(&bigint_space, mod); - const int_info = tv.ty.intInfo(target); + const int_info = tv.ty.intInfo(mod); const llvm_type = dg.context.intType(int_info.bits); const unsigned_val = v: { @@ -3408,7 +3415,7 @@ pub const DeclGen = struct { }, .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(target), .False); + const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); }, .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { @@ -3439,7 +3446,7 @@ pub const DeclGen = struct { const str_lit = tv.val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; if (tv.ty.sentinel()) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(target)); + const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); if (byte == 0 and bytes.len > 0) { return dg.context.constString( bytes.ptr, @@ -3549,13 +3556,13 @@ pub const DeclGen = struct { const payload_ty = tv.ty.optionalChild(&buf); const llvm_i8 = dg.context.intType(8); - const is_pl = !tv.val.isNull(); + const is_pl = !tv.val.isNull(mod); const non_null_bit = if (is_pl) llvm_i8.constInt(1, .False) else llvm_i8.constNull(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return non_null_bit; } const llvm_ty = try dg.lowerType(tv.ty); - if (tv.ty.optionalReprIsPayload()) { + if (tv.ty.optionalReprIsPayload(mod)) { if (tv.val.castTag(.opt_payload)) |payload| { return dg.lowerValue(.{ .ty = payload_ty, .val = payload.data }); } else if (is_pl) { @@ -3564,7 +3571,7 @@ pub const DeclGen = struct { return llvm_ty.constNull(); } } - assert(payload_ty.zigTypeTag() != .Fn); + assert(payload_ty.zigTypeTag(mod) != .Fn); const llvm_field_count = llvm_ty.countStructElementTypes(); var fields_buf: [3]*llvm.Value = undefined; @@ -3607,14 +3614,14 @@ pub const DeclGen = struct { const payload_type = tv.ty.errorUnionPayload(); const is_pl = tv.val.errorUnionIsPayload(); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const err_val = if (!is_pl) tv.val else Value.initTag(.zero); return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } - const payload_align = payload_type.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); + const payload_align = payload_type.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); const llvm_error_value = try dg.lowerValue(.{ .ty = Type.anyerror, .val = if (is_pl) Value.initTag(.zero) else tv.val, @@ -3661,9 +3668,9 @@ pub const DeclGen = struct { for (tuple.types, 0..) |field_ty, i| { if (tuple.values[i].tag() != .unreachable_value) continue; - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -3685,7 +3692,7 @@ pub const DeclGen = struct { llvm_fields.appendAssumeCapacity(field_llvm_val); - offset += field_ty.abiSize(target); + offset += field_ty.abiSize(mod); } { const prev_offset = offset; @@ -3715,7 +3722,7 @@ pub const DeclGen = struct { if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(target); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); @@ -3723,15 +3730,15 @@ pub const DeclGen = struct { var running_bits: u16 = 0; for (field_vals, 0..) |field_val, i| { const field = fields[i]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const non_int_val = try dg.lowerValue(.{ .ty = field.ty, .val = field_val, }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime()) + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) else non_int_val.constBitCast(small_int_ty); @@ -3756,10 +3763,10 @@ pub const DeclGen = struct { var big_align: u32 = 0; var need_unnamed = false; - var it = struct_obj.runtimeFieldIterator(); + var it = struct_obj.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_align = field.alignment(target, struct_obj.layout); + const field_align = field.alignment(mod, struct_obj.layout); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -3781,7 +3788,7 @@ pub const DeclGen = struct { llvm_fields.appendAssumeCapacity(field_llvm_val); - offset += field.ty.abiSize(target); + offset += field.ty.abiSize(mod); } { const prev_offset = offset; @@ -3810,7 +3817,7 @@ pub const DeclGen = struct { const llvm_union_ty = try dg.lowerType(tv.ty); const tag_and_val = tv.val.castTag(.@"union").?.data; - const layout = tv.ty.unionGetLayout(target); + const layout = tv.ty.unionGetLayout(mod); if (layout.payload_size == 0) { return lowerValue(dg, .{ @@ -3824,12 +3831,12 @@ pub const DeclGen = struct { const field_ty = union_obj.fields.values()[field_index].ty; if (union_obj.layout == .Packed) { - if (!field_ty.hasRuntimeBits()) + if (!field_ty.hasRuntimeBits(mod)) return llvm_union_ty.constNull(); const non_int_val = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); - const ty_bit_size = @intCast(u16, field_ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field_ty.bitSize(mod)); const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field_ty.isPtrAtRuntime()) + const small_int_val = if (field_ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) else non_int_val.constBitCast(small_int_ty); @@ -3842,13 +3849,13 @@ pub const DeclGen = struct { // must pointer cast to the expected type before accessing the union. var need_unnamed: bool = layout.most_aligned_field != field_index; const payload = p: { - if (!field_ty.hasRuntimeBitsIgnoreComptime()) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { const padding_len = @intCast(c_uint, layout.payload_size); break :p dg.context.intType(8).arrayType(padding_len).getUndef(); } const field = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field); - const field_size = field_ty.abiSize(target); + const field_size = field_ty.abiSize(mod); if (field_size == layout.payload_size) { break :p field; } @@ -4012,7 +4019,8 @@ pub const DeclGen = struct { } fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { - const target = dg.module.getTarget(); + const mod = dg.module; + const target = mod.getTarget(); switch (ptr_val.tag()) { .decl_ref_mut => { const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; @@ -4045,13 +4053,13 @@ pub const DeclGen = struct { const field_index = @intCast(u32, field_ptr.field_index); const llvm_u32 = dg.context.intType(32); - switch (parent_ty.zigTypeTag()) { + switch (parent_ty.zigTypeTag(mod)) { .Union => { if (parent_ty.containerLayout() == .Packed) { return parent_llvm_ptr; } - const layout = parent_ty.unionGetLayout(target); + const layout = parent_ty.unionGetLayout(mod); if (layout.payload_size == 0) { // In this case a pointer to the union and a pointer to any // (void) payload is the same. @@ -4077,8 +4085,8 @@ pub const DeclGen = struct { const prev_bits = b: { var b: usize = 0; for (parent_ty.structFields().values()[0..field_index]) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue; - b += @intCast(usize, field.ty.bitSize(target)); + if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + b += @intCast(usize, field.ty.bitSize(mod)); } break :b b; }; @@ -4091,14 +4099,14 @@ pub const DeclGen = struct { var ty_buf: Type.Payload.Pointer = undefined; const parent_llvm_ty = try dg.lowerType(parent_ty); - if (llvmFieldIndex(parent_ty, field_index, target, &ty_buf)) |llvm_field_index| { + if (llvmFieldIndex(parent_ty, field_index, mod, &ty_buf)) |llvm_field_index| { const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(llvm_field_index, .False), }; return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } else { - const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime()), .False); + const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); const indices: [1]*llvm.Value = .{llvm_index}; return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } @@ -4132,8 +4140,8 @@ pub const DeclGen = struct { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or - payload_ty.optionalReprIsPayload()) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + payload_ty.optionalReprIsPayload(mod)) { // In this case, we represent pointer to optional the same as pointer // to the payload. @@ -4153,13 +4161,13 @@ pub const DeclGen = struct { const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, true); const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // In this case, we represent pointer to error union the same as pointer // to the payload. return parent_llvm_ptr; } - const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; + const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; const llvm_u32 = dg.context.intType(32); const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), @@ -4177,12 +4185,13 @@ pub const DeclGen = struct { tv: TypedValue, decl_index: Module.Decl.Index, ) Error!*llvm.Value { + const mod = self.module; if (tv.ty.isSlice()) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = tv.ty.slicePtrFieldType(&buf); var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = tv.val.sliceLen(self.module), + .data = tv.val.sliceLen(mod), }; const fields: [2]*llvm.Value = .{ try self.lowerValue(.{ @@ -4202,7 +4211,7 @@ pub const DeclGen = struct { // const bar = foo; // ... &bar; // `bar` is just an alias and we actually want to lower a reference to `foo`. - const decl = self.module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.val.castTag(.function)) |func| { if (func.data.owner_decl != decl_index) { return self.lowerDeclRefValue(tv, func.data.owner_decl); @@ -4213,21 +4222,21 @@ pub const DeclGen = struct { } } - const is_fn_body = decl.ty.zigTypeTag() == .Fn; - if ((!is_fn_body and !decl.ty.hasRuntimeBits()) or + const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; + if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or (is_fn_body and decl.ty.fnInfo().is_generic)) { return self.lowerPtrToVoid(tv.ty); } - self.module.markDeclAlive(decl); + mod.markDeclAlive(decl); const llvm_decl_val = if (is_fn_body) try self.resolveLlvmFunction(decl_index) else try self.resolveGlobalDecl(decl_index); - const target = self.module.getTarget(); + const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) blk: { @@ -4236,7 +4245,7 @@ pub const DeclGen = struct { } else llvm_decl_val; const llvm_type = try self.lowerType(tv.ty); - if (tv.ty.zigTypeTag() == .Int) { + if (tv.ty.zigTypeTag(mod) == .Int) { return llvm_val.constPtrToInt(llvm_type); } else { return llvm_val.constBitCast(llvm_type); @@ -4338,21 +4347,20 @@ pub const DeclGen = struct { /// RMW exchange of floating-point values is bitcasted to same-sized integer /// types to work around a LLVM deficiency when targeting ARM/AArch64. fn getAtomicAbiType(dg: *DeclGen, ty: Type, is_rmw_xchg: bool) ?*llvm.Type { - const target = dg.module.getTarget(); - var buffer: Type.Payload.Bits = undefined; - const int_ty = switch (ty.zigTypeTag()) { + const mod = dg.module; + const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, - .Enum => ty.intTagType(&buffer), + .Enum => ty.intTagType(), .Float => { if (!is_rmw_xchg) return null; - return dg.context.intType(@intCast(c_uint, ty.abiSize(target) * 8)); + return dg.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8)); }, .Bool => return dg.context.intType(8), else => return null, }; - const bit_count = int_ty.intInfo(target).bits; + const bit_count = int_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) { - return dg.context.intType(@intCast(c_uint, int_ty.abiSize(target) * 8)); + return dg.context.intType(@intCast(c_uint, int_ty.abiSize(mod) * 8)); } else { return null; } @@ -4366,15 +4374,15 @@ pub const DeclGen = struct { fn_info: Type.Payload.Function.Data, llvm_arg_i: u32, ) void { - const target = dg.module.getTarget(); - if (param_ty.isPtrAtRuntime()) { + const mod = dg.module; + if (param_ty.isPtrAtRuntime(mod)) { const ptr_info = param_ty.ptrInfo().data; if (math.cast(u5, param_index)) |i| { if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { dg.addArgAttr(llvm_fn, llvm_arg_i, "noalias"); } } - if (!param_ty.isPtrLikeOptional() and !ptr_info.@"allowzero") { + if (!param_ty.isPtrLikeOptional(mod) and !ptr_info.@"allowzero") { dg.addArgAttr(llvm_fn, llvm_arg_i, "nonnull"); } if (!ptr_info.mutable) { @@ -4383,13 +4391,10 @@ pub const DeclGen = struct { if (ptr_info.@"align" != 0) { dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", ptr_info.@"align"); } else { - const elem_align = @max( - ptr_info.pointee_type.abiAlignment(target), - 1, - ); + const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1); dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", elem_align); } - } else if (ccAbiPromoteInt(fn_info.cc, target, param_ty)) |s| switch (s) { + } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) { .signed => dg.addArgAttr(llvm_fn, llvm_arg_i, "signext"), .unsigned => dg.addArgAttr(llvm_fn, llvm_arg_i, "zeroext"), }; @@ -4490,9 +4495,10 @@ pub const FuncGen = struct { const gop = try self.func_inst_table.getOrPut(self.dg.gpa, inst); if (gop.found_existing) return gop.value_ptr.*; + const mod = self.dg.module; const llvm_val = try self.resolveValue(.{ .ty = self.air.typeOf(inst), - .val = self.air.value(inst).?, + .val = self.air.value(inst, mod).?, }); gop.value_ptr.* = llvm_val; return llvm_val; @@ -4500,11 +4506,12 @@ pub const FuncGen = struct { fn resolveValue(self: *FuncGen, tv: TypedValue) !*llvm.Value { const llvm_val = try self.dg.lowerValue(tv); - if (!isByRef(tv.ty)) return llvm_val; + const mod = self.dg.module; + if (!isByRef(tv.ty, mod)) return llvm_val; // We have an LLVM value but we need to create a global constant and // set the value as its initializer, and then return a pointer to the global. - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target); const global = self.dg.object.llvm_module.addGlobalInAddressSpace(llvm_val.typeOf(), "", llvm_actual_addrspace); @@ -4512,7 +4519,7 @@ pub const FuncGen = struct { global.setLinkage(.Private); global.setGlobalConstant(.True); global.setUnnamedAddr(.True); - global.setAlignment(tv.ty.abiAlignment(target)); + global.setAlignment(tv.ty.abiAlignment(mod)); const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) global.constAddrSpaceCast(self.context.pointerType(llvm_wanted_addrspace)) else @@ -4775,7 +4782,8 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const callee_ty = self.air.typeOf(pl_op.operand); - const zig_fn_ty = switch (callee_ty.zigTypeTag()) { + const mod = self.dg.module; + const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => callee_ty.childType(), else => unreachable, @@ -4783,20 +4791,20 @@ pub const FuncGen = struct { const fn_info = zig_fn_ty.fnInfo(); const return_type = fn_info.return_type; const llvm_fn = try self.resolveInst(pl_op.operand); - const target = self.dg.module.getTarget(); - const sret = firstParamSRet(fn_info, target); + const target = mod.getTarget(); + const sret = firstParamSRet(fn_info, mod); var llvm_args = std.ArrayList(*llvm.Value).init(self.gpa); defer llvm_args.deinit(); const ret_ptr = if (!sret) null else blk: { const llvm_ret_ty = try self.dg.lowerType(return_type); - const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(target)); + const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod)); try llvm_args.append(ret_ptr); break :blk ret_ptr; }; - const err_return_tracing = fn_info.return_type.isError() and + const err_return_tracing = fn_info.return_type.isError(mod) and self.dg.module.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { try llvm_args.append(self.err_ret_trace.?); @@ -4810,8 +4818,8 @@ pub const FuncGen = struct { const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const llvm_param_ty = try self.dg.lowerType(param_ty); - if (isByRef(param_ty)) { - const alignment = param_ty.abiAlignment(target); + if (isByRef(param_ty, mod)) { + const alignment = param_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(llvm_param_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4823,10 +4831,10 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { try llvm_args.append(llvm_arg); } else { - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = llvm_arg.typeOf(); const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); const store_inst = self.builder.buildStore(llvm_arg, arg_ptr); @@ -4839,10 +4847,10 @@ pub const FuncGen = struct { const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = try self.dg.lowerType(param_ty); const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); @@ -4859,11 +4867,11 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const abi_size = @intCast(c_uint, param_ty.abiSize(target)); + const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); const int_llvm_ty = self.context.intType(abi_size * 8); - if (isByRef(param_ty)) { - const alignment = param_ty.abiAlignment(target); + if (isByRef(param_ty, mod)) { + const alignment = param_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(int_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4871,7 +4879,7 @@ pub const FuncGen = struct { // LLVM does not allow bitcasting structs so we must allocate // a local, store as one type, and then load as another type. const alignment = @max( - param_ty.abiAlignment(target), + param_ty.abiAlignment(mod), self.dg.object.target_data.abiAlignmentOfType(int_llvm_ty), ); const int_ptr = self.buildAlloca(int_llvm_ty, alignment); @@ -4896,11 +4904,11 @@ pub const FuncGen = struct { const param_ty = self.air.typeOf(arg); const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len]; const llvm_arg = try self.resolveInst(arg); - const is_by_ref = isByRef(param_ty); + const is_by_ref = isByRef(param_ty, mod); const arg_ptr = if (is_by_ref) llvm_arg else p: { const p = self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(param_ty.abiAlignment(target)); + store_inst.setAlignment(param_ty.abiAlignment(mod)); break :p p; }; @@ -4924,17 +4932,17 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const arg_ty = self.air.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - if (!isByRef(arg_ty)) { + if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(arg_ty.abiAlignment(target)); + store_inst.setAlignment(arg_ty.abiAlignment(mod)); llvm_arg = store_inst; } - const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty).?); + const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?); const array_llvm_ty = float_ty.arrayType(count); - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4944,15 +4952,15 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const arg_ty = self.air.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - if (!isByRef(arg_ty)) { + if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(arg_ty.abiAlignment(target)); + store_inst.setAlignment(arg_ty.abiAlignment(mod)); llvm_arg = store_inst; } const array_llvm_ty = self.context.intType(elem_size).arrayType(arr_len); - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4969,7 +4977,7 @@ pub const FuncGen = struct { "", ); - if (callee_ty.zigTypeTag() == .Pointer) { + if (callee_ty.zigTypeTag(mod) == .Pointer) { // Add argument attributes for function pointer calls. it = iterateParamTypes(self.dg, fn_info); it.llvm_index += @boolToInt(sret); @@ -4978,7 +4986,7 @@ pub const FuncGen = struct { .byval => { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types[param_index]; - if (!isByRef(param_ty)) { + if (!isByRef(param_ty, mod)) { self.dg.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1); } }, @@ -4986,7 +4994,7 @@ pub const FuncGen = struct { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types[param_index]; const param_llvm_ty = try self.dg.lowerType(param_ty); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); self.dg.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { @@ -5013,7 +5021,7 @@ pub const FuncGen = struct { self.dg.addArgAttr(call, llvm_arg_i, "noalias"); } } - if (param_ty.zigTypeTag() != .Optional) { + if (param_ty.zigTypeTag(mod) != .Optional) { self.dg.addArgAttr(call, llvm_arg_i, "nonnull"); } if (!ptr_info.mutable) { @@ -5022,7 +5030,7 @@ pub const FuncGen = struct { if (ptr_info.@"align" != 0) { self.dg.addArgAttrInt(call, llvm_arg_i, "align", ptr_info.@"align"); } else { - const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1); + const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1); self.dg.addArgAttrInt(call, llvm_arg_i, "align", elem_align); } }, @@ -5033,7 +5041,7 @@ pub const FuncGen = struct { return null; } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) { + if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { return null; } @@ -5041,12 +5049,12 @@ pub const FuncGen = struct { if (ret_ptr) |rp| { call.setCallSret(llvm_ret_ty); - if (isByRef(return_type)) { + if (isByRef(return_type, mod)) { return rp; } else { // our by-ref status disagrees with sret so we must load. const loaded = self.builder.buildLoad(llvm_ret_ty, rp, ""); - loaded.setAlignment(return_type.abiAlignment(target)); + loaded.setAlignment(return_type.abiAlignment(mod)); return loaded; } } @@ -5061,7 +5069,7 @@ pub const FuncGen = struct { const rp = self.buildAlloca(llvm_ret_ty, alignment); const store_inst = self.builder.buildStore(call, rp); store_inst.setAlignment(alignment); - if (isByRef(return_type)) { + if (isByRef(return_type, mod)) { return rp; } else { const load_inst = self.builder.buildLoad(llvm_ret_ty, rp, ""); @@ -5070,10 +5078,10 @@ pub const FuncGen = struct { } } - if (isByRef(return_type)) { + if (isByRef(return_type, mod)) { // our by-ref status disagrees with sret so we must allocate, store, // and return the allocation pointer. - const alignment = return_type.abiAlignment(target); + const alignment = return_type.abiAlignment(mod); const rp = self.buildAlloca(llvm_ret_ty, alignment); const store_inst = self.builder.buildStore(call, rp); store_inst.setAlignment(alignment); @@ -5084,6 +5092,7 @@ pub const FuncGen = struct { } fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ret_ty = self.air.typeOf(un_op); if (self.ret_ptr) |ret_ptr| { @@ -5098,8 +5107,8 @@ pub const FuncGen = struct { return null; } const fn_info = self.dg.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (fn_info.return_type.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (fn_info.return_type.isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5113,10 +5122,9 @@ pub const FuncGen = struct { const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); const operand = try self.resolveInst(un_op); - const target = self.dg.module.getTarget(); - const alignment = ret_ty.abiAlignment(target); + const alignment = ret_ty.abiAlignment(mod); - if (isByRef(ret_ty)) { + if (isByRef(ret_ty, mod)) { // operand is a pointer however self.ret_ptr is null so that means // we need to return a value. const load_inst = self.builder.buildLoad(abi_ret_ty, operand, ""); @@ -5145,8 +5153,9 @@ pub const FuncGen = struct { const ptr_ty = self.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); const fn_info = self.dg.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (fn_info.return_type.isError()) { + const mod = self.dg.module; + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (fn_info.return_type.isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5162,10 +5171,9 @@ pub const FuncGen = struct { return null; } const ptr = try self.resolveInst(un_op); - const target = self.dg.module.getTarget(); const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); const loaded = self.builder.buildLoad(abi_ret_ty, ptr, ""); - loaded.setAlignment(ret_ty.abiAlignment(target)); + loaded.setAlignment(ret_ty.abiAlignment(mod)); _ = self.builder.buildRet(loaded); return null; } @@ -5184,9 +5192,9 @@ pub const FuncGen = struct { const src_list = try self.resolveInst(ty_op.operand); const va_list_ty = self.air.getRefType(ty_op.ty); const llvm_va_list_ty = try self.dg.lowerType(va_list_ty); + const mod = self.dg.module; - const target = self.dg.module.getTarget(); - const result_alignment = va_list_ty.abiAlignment(target); + const result_alignment = va_list_ty.abiAlignment(mod); const dest_list = self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_copy"; @@ -5202,7 +5210,7 @@ pub const FuncGen = struct { const args: [2]*llvm.Value = .{ dest_list, src_list }; _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); - if (isByRef(va_list_ty)) { + if (isByRef(va_list_ty, mod)) { return dest_list; } else { const loaded = self.builder.buildLoad(llvm_va_list_ty, dest_list, ""); @@ -5227,11 +5235,11 @@ pub const FuncGen = struct { } fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const va_list_ty = self.air.typeOfIndex(inst); const llvm_va_list_ty = try self.dg.lowerType(va_list_ty); - const target = self.dg.module.getTarget(); - const result_alignment = va_list_ty.abiAlignment(target); + const result_alignment = va_list_ty.abiAlignment(mod); const list = self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_start"; @@ -5243,7 +5251,7 @@ pub const FuncGen = struct { const args: [1]*llvm.Value = .{list}; _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); - if (isByRef(va_list_ty)) { + if (isByRef(va_list_ty, mod)) { return list; } else { const loaded = self.builder.buildLoad(llvm_va_list_ty, list, ""); @@ -5292,23 +5300,23 @@ pub const FuncGen = struct { operand_ty: Type, op: math.CompareOperator, ) Allocator.Error!*llvm.Value { - var int_buffer: Type.Payload.Bits = undefined; var opt_buffer: Type.Payload.ElemType = undefined; - const scalar_ty = operand_ty.scalarType(); - const int_ty = switch (scalar_ty.zigTypeTag()) { - .Enum => scalar_ty.intTagType(&int_buffer), + const mod = self.dg.module; + const scalar_ty = operand_ty.scalarType(mod); + const int_ty = switch (scalar_ty.zigTypeTag(mod)) { + .Enum => scalar_ty.intTagType(), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or - operand_ty.optionalReprIsPayload()) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + operand_ty.optionalReprIsPayload(mod)) { break :blk operand_ty; } // We need to emit instructions to check for equality/inequality // of optionals that are not pointers. - const is_by_ref = isByRef(scalar_ty); + const is_by_ref = isByRef(scalar_ty, mod); const opt_llvm_ty = try self.dg.lowerType(scalar_ty); const lhs_non_null = self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref); const rhs_non_null = self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref); @@ -5375,7 +5383,7 @@ pub const FuncGen = struct { .Float => return self.buildFloatCmp(op, operand_ty, .{ lhs, rhs }), else => unreachable, }; - const is_signed = int_ty.isSignedInt(); + const is_signed = int_ty.isSignedInt(mod); const operation: llvm.IntPredicate = switch (op) { .eq => .EQ, .neq => .NE, @@ -5393,6 +5401,7 @@ pub const FuncGen = struct { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const inst_ty = self.air.typeOfIndex(inst); const parent_bb = self.context.createBasicBlock("Block"); + const mod = self.dg.module; if (inst_ty.isNoReturn()) { try self.genBody(body); @@ -5414,8 +5423,8 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(parent_bb); // Create a phi node only if the block returns a value. - const is_body = inst_ty.zigTypeTag() == .Fn; - if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime()) return null; + const is_body = inst_ty.zigTypeTag(mod) == .Fn; + if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const raw_llvm_ty = try self.dg.lowerType(inst_ty); @@ -5424,7 +5433,7 @@ pub const FuncGen = struct { // a pointer to it. LLVM IR allows the call instruction to use function bodies instead // of function pointers, however the phi makes it a runtime value and therefore // the LLVM type has to be wrapped in a pointer. - if (is_body or isByRef(inst_ty)) { + if (is_body or isByRef(inst_ty, mod)) { break :ty self.context.pointerType(0); } break :ty raw_llvm_ty; @@ -5445,7 +5454,8 @@ pub const FuncGen = struct { // Add the values to the lists only if the break provides a value. const operand_ty = self.air.typeOf(branch.operand); - if (operand_ty.hasRuntimeBitsIgnoreComptime() or operand_ty.zigTypeTag() == .Fn) { + const mod = self.dg.module; + if (operand_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.zigTypeTag(mod) == .Fn) { const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the @@ -5481,6 +5491,7 @@ pub const FuncGen = struct { } fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const err_union = try self.resolveInst(pl_op.operand); @@ -5488,7 +5499,7 @@ pub const FuncGen = struct { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const err_union_ty = self.air.typeOf(pl_op.operand); const payload_ty = self.air.typeOfIndex(inst); - const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); } @@ -5512,9 +5523,9 @@ pub const FuncGen = struct { can_elide_load: bool, is_unused: bool, ) !?*llvm.Value { + const mod = fg.dg.module; const payload_ty = err_union_ty.errorUnionPayload(); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(); - const target = fg.dg.module.getTarget(); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_union_llvm_ty = try fg.dg.lowerType(err_union_ty); if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { @@ -5529,8 +5540,8 @@ pub const FuncGen = struct { err_union; break :err fg.builder.buildICmp(.NE, loaded, zero, ""); } - const err_field_index = errUnionErrorOffset(payload_ty, target); - if (operand_is_ptr or isByRef(err_union_ty)) { + const err_field_index = errUnionErrorOffset(payload_ty, mod); + if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_field_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, err_field_index, ""); // TODO add alignment to this load const loaded = fg.builder.buildLoad(err_set_ty, err_field_ptr, ""); @@ -5555,30 +5566,31 @@ pub const FuncGen = struct { if (!payload_has_bits) { return if (operand_is_ptr) err_union else null; } - const offset = errUnionPayloadOffset(payload_ty, target); + const offset = errUnionPayloadOffset(payload_ty, mod); if (operand_is_ptr) { return fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, ""); - } else if (isByRef(err_union_ty)) { + } else if (isByRef(err_union_ty, mod)) { const payload_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, ""); - if (isByRef(payload_ty)) { + if (isByRef(payload_ty, mod)) { if (can_elide_load) return payload_ptr; - return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false); + return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false); } const load_inst = fg.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, ""); - load_inst.setAlignment(payload_ty.abiAlignment(target)); + load_inst.setAlignment(payload_ty.abiAlignment(mod)); return load_inst; } return fg.builder.buildExtractValue(err_union, offset, ""); } fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const llvm_usize = self.context.intType(target.ptrBitWidth()); const cond_int = if (cond.typeOf().getTypeKind() == .Pointer) self.builder.buildPtrToInt(cond, llvm_usize, "") @@ -5645,6 +5657,7 @@ pub const FuncGen = struct { } fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const array_ty = operand_ty.childType(); @@ -5652,7 +5665,7 @@ pub const FuncGen = struct { const len = llvm_usize.constInt(array_ty.arrayLen(), .False); const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); - if (!array_ty.hasRuntimeBitsIgnoreComptime()) { + if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, ""); return self.builder.buildInsertValue(partial, len, 1, ""); } @@ -5666,30 +5679,31 @@ pub const FuncGen = struct { } fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - const operand_scalar_ty = operand_ty.scalarType(); + const operand_scalar_ty = operand_ty.scalarType(mod); const dest_ty = self.air.typeOfIndex(inst); - const dest_scalar_ty = dest_ty.scalarType(); + const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); if (intrinsicsAllowed(dest_scalar_ty, target)) { - if (operand_scalar_ty.isSignedInt()) { + if (operand_scalar_ty.isSignedInt(mod)) { return self.builder.buildSIToFP(operand, dest_llvm_ty, ""); } else { return self.builder.buildUIToFP(operand, dest_llvm_ty, ""); } } - const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(target)); + const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(mod)); const rt_int_bits = compilerRtIntBits(operand_bits); const rt_int_ty = self.context.intType(rt_int_bits); var extended = e: { - if (operand_scalar_ty.isSignedInt()) { + if (operand_scalar_ty.isSignedInt(mod)) { break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty, ""); } else { break :e self.builder.buildZExtOrBitCast(operand, rt_int_ty, ""); @@ -5698,7 +5712,7 @@ pub const FuncGen = struct { const dest_bits = dest_scalar_ty.floatBits(target); const compiler_rt_operand_abbrev = compilerRtIntAbbrev(rt_int_bits); const compiler_rt_dest_abbrev = compilerRtFloatAbbrev(dest_bits); - const sign_prefix = if (operand_scalar_ty.isSignedInt()) "" else "un"; + const sign_prefix = if (operand_scalar_ty.isSignedInt(mod)) "" else "un"; var fn_name_buf: [64]u8 = undefined; const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__float{s}{s}i{s}f", .{ sign_prefix, @@ -5724,27 +5738,28 @@ pub const FuncGen = struct { fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - const operand_scalar_ty = operand_ty.scalarType(); + const operand_scalar_ty = operand_ty.scalarType(mod); const dest_ty = self.air.typeOfIndex(inst); - const dest_scalar_ty = dest_ty.scalarType(); + const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); if (intrinsicsAllowed(operand_scalar_ty, target)) { // TODO set fast math flag - if (dest_scalar_ty.isSignedInt()) { + if (dest_scalar_ty.isSignedInt(mod)) { return self.builder.buildFPToSI(operand, dest_llvm_ty, ""); } else { return self.builder.buildFPToUI(operand, dest_llvm_ty, ""); } } - const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(target))); + const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(mod))); const ret_ty = self.context.intType(rt_int_bits); const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard @@ -5756,7 +5771,7 @@ pub const FuncGen = struct { const compiler_rt_operand_abbrev = compilerRtFloatAbbrev(operand_bits); const compiler_rt_dest_abbrev = compilerRtIntAbbrev(rt_int_bits); - const sign_prefix = if (dest_scalar_ty.isSignedInt()) "" else "uns"; + const sign_prefix = if (dest_scalar_ty.isSignedInt(mod)) "" else "uns"; var fn_name_buf: [64]u8 = undefined; const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__fix{s}{s}f{s}i", .{ @@ -5786,13 +5801,14 @@ pub const FuncGen = struct { } fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { - const target = fg.dg.module.getTarget(); + const mod = fg.dg.module; + const target = mod.getTarget(); const llvm_usize_ty = fg.context.intType(target.ptrBitWidth()); switch (ty.ptrSize()) { .Slice => { const len = fg.builder.buildExtractValue(ptr, 1, ""); const elem_ty = ty.childType(); - const abi_size = elem_ty.abiSize(target); + const abi_size = elem_ty.abiSize(mod); if (abi_size == 1) return len; const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False); return fg.builder.buildMul(len, abi_size_llvm_val, ""); @@ -5800,7 +5816,7 @@ pub const FuncGen = struct { .One => { const array_ty = ty.childType(); const elem_ty = array_ty.childType(); - const abi_size = elem_ty.abiSize(target); + const abi_size = elem_ty.abiSize(mod); return llvm_usize_ty.constInt(array_ty.arrayLen() * abi_size, .False); }, .Many, .C => unreachable, @@ -5823,6 +5839,7 @@ pub const FuncGen = struct { } fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.air.typeOf(bin_op.lhs); @@ -5833,12 +5850,11 @@ pub const FuncGen = struct { const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; const ptr = self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { if (self.canElideLoad(body_tail)) return ptr; - const target = self.dg.module.getTarget(); - return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(target), false); + return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false); } return self.load(ptr, slice_ty); @@ -5858,6 +5874,7 @@ pub const FuncGen = struct { } fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -5866,15 +5883,14 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try self.dg.lowerType(array_ty); const elem_ty = array_ty.childType(); - if (isByRef(array_ty)) { + if (isByRef(array_ty, mod)) { const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, ""); if (canElideLoad(self, body_tail)) return elem_ptr; - const target = self.dg.module.getTarget(); - return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(target), false); + return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(mod), false); } else { const lhs_index = Air.refToIndex(bin_op.lhs).?; const elem_llvm_ty = try self.dg.lowerType(elem_ty); @@ -5901,6 +5917,7 @@ pub const FuncGen = struct { } fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); @@ -5917,23 +5934,23 @@ pub const FuncGen = struct { const indices: [1]*llvm.Value = .{rhs}; break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); }; - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { if (self.canElideLoad(body_tail)) return ptr; - const target = self.dg.module.getTarget(); - return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(target), false); + return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false); } return self.load(ptr, ptr_ty); } fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.air.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -5972,6 +5989,7 @@ pub const FuncGen = struct { } fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -5979,29 +5997,28 @@ pub const FuncGen = struct { const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { return null; } - const target = self.dg.module.getTarget(); - if (!isByRef(struct_ty)) { - assert(!isByRef(field_ty)); - switch (struct_ty.zigTypeTag()) { + if (!isByRef(struct_ty, mod)) { + assert(!isByRef(field_ty, mod)); + switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { const struct_obj = struct_ty.castTag(.@"struct").?.data; - const bit_offset = struct_obj.packedFieldBitOffset(target, field_index); + const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index); const containing_int = struct_llvm_val; const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.lowerType(field_ty); - if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); - } else if (field_ty.isPtrAtRuntime()) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + } else if (field_ty.isPtrAtRuntime(mod)) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -6010,7 +6027,7 @@ pub const FuncGen = struct { }, else => { var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, ""); }, }, @@ -6018,13 +6035,13 @@ pub const FuncGen = struct { assert(struct_ty.containerLayout() == .Packed); const containing_int = struct_llvm_val; const elem_llvm_ty = try self.dg.lowerType(field_ty); - if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); - } else if (field_ty.isPtrAtRuntime()) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + } else if (field_ty.isPtrAtRuntime(mod)) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -6035,30 +6052,30 @@ pub const FuncGen = struct { } } - switch (struct_ty.zigTypeTag()) { + switch (struct_ty.zigTypeTag(mod)) { .Struct => { assert(struct_ty.containerLayout() != .Packed); var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field_index, ""); const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); - if (isByRef(field_ty)) { + if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; - return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(target), false); + return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(mod), false); } else { return self.load(field_ptr, field_ptr_ty); } }, .Union => { const union_llvm_ty = try self.dg.lowerType(struct_ty); - const layout = struct_ty.unionGetLayout(target); + const layout = struct_ty.unionGetLayout(mod); const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, ""); const llvm_field_ty = try self.dg.lowerType(field_ty); - if (isByRef(field_ty)) { + if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; @@ -6072,6 +6089,7 @@ pub const FuncGen = struct { } fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; @@ -6079,7 +6097,7 @@ pub const FuncGen = struct { const target = self.dg.module.getTarget(); const parent_ty = self.air.getRefType(ty_pl.ty).childType(); - const field_offset = parent_ty.structFieldOffset(extra.field_index, target); + const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty)); if (field_offset == 0) { @@ -6119,12 +6137,13 @@ pub const FuncGen = struct { } fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const dib = self.dg.object.di_builder orelse return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const func = self.air.values[ty_pl.payload].castTag(.function).?.data; const decl_index = func.owner_decl; - const decl = self.dg.module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope); self.di_file = di_file; const line_number = decl.src_line + 1; @@ -6136,22 +6155,41 @@ pub const FuncGen = struct { .base_line = self.base_line, }); - const fqn = try decl.getFullyQualifiedName(self.dg.module); + const fqn = try decl.getFullyQualifiedName(mod); defer self.gpa.free(fqn); - const is_internal_linkage = !self.dg.module.decl_exports.contains(decl_index); + const is_internal_linkage = !mod.decl_exports.contains(decl_index); + var fn_ty_pl: Type.Payload.Function = .{ + .base = .{ .tag = .function }, + .data = .{ + .param_types = &.{}, + .comptime_params = undefined, + .return_type = Type.void, + .alignment = 0, + .noalias_bits = 0, + .cc = .Unspecified, + .is_var_args = false, + .is_generic = false, + .is_noinline = false, + .align_is_generic = false, + .cc_is_generic = false, + .section_is_generic = false, + .addrspace_is_generic = false, + }, + }; + const fn_ty = Type.initPayload(&fn_ty_pl.base); const subprogram = dib.createFunction( di_file.toScope(), decl.name, fqn, di_file, line_number, - try self.dg.object.lowerDebugType(Type.initTag(.fn_void_no_args), .full), + try self.dg.object.lowerDebugType(fn_ty, .full), is_internal_linkage, true, // is definition line_number + func.lbrace_line, // scope line llvm.DIFlags.StaticMember, - self.dg.module.comp.bin_file.options.optimize_mode != .Debug, + mod.comp.bin_file.options.optimize_mode != .Debug, null, // decl_subprogram ); @@ -6243,10 +6281,11 @@ pub const FuncGen = struct { null; const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at); const insert_block = self.builder.getInsertBlock(); - if (isByRef(operand_ty)) { + const mod = self.dg.module; + if (isByRef(operand_ty, mod)) { _ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block); } else if (self.dg.module.comp.bin_file.options.optimize_mode == .Debug) { - const alignment = operand_ty.abiAlignment(self.dg.module.getTarget()); + const alignment = operand_ty.abiAlignment(mod); const alloca = self.buildAlloca(operand.typeOf(), alignment); const store_inst = self.builder.buildStore(operand, alloca); store_inst.setAlignment(alignment); @@ -6294,7 +6333,8 @@ pub const FuncGen = struct { // This stores whether we need to add an elementtype attribute and // if so, the element type itself. const llvm_param_attrs = try arena.alloc(?*llvm.Type, max_param_count); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); var llvm_ret_i: usize = 0; var llvm_param_i: usize = 0; @@ -6322,7 +6362,7 @@ pub const FuncGen = struct { if (output != .none) { const output_inst = try self.resolveInst(output); const output_ty = self.air.typeOf(output); - assert(output_ty.zigTypeTag() == .Pointer); + assert(output_ty.zigTypeTag(mod) == .Pointer); const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType()); if (llvm_ret_indirect[i]) { @@ -6376,13 +6416,13 @@ pub const FuncGen = struct { const arg_llvm_value = try self.resolveInst(input); const arg_ty = self.air.typeOf(input); var llvm_elem_ty: ?*llvm.Type = null; - if (isByRef(arg_ty)) { + if (isByRef(arg_ty, mod)) { llvm_elem_ty = try self.dg.lowerPtrElemTy(arg_ty); if (constraintAllowsMemory(constraint)) { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); } else { - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const arg_llvm_ty = try self.dg.lowerType(arg_ty); const load_inst = self.builder.buildLoad(arg_llvm_ty, arg_llvm_value, ""); load_inst.setAlignment(alignment); @@ -6394,7 +6434,7 @@ pub const FuncGen = struct { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); } else { - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const arg_ptr = self.buildAlloca(arg_llvm_value.typeOf(), alignment); const store_inst = self.builder.buildStore(arg_llvm_value, arg_ptr); store_inst.setAlignment(alignment); @@ -6599,7 +6639,7 @@ pub const FuncGen = struct { const output_ptr_ty = self.air.typeOf(output); const store_inst = self.builder.buildStore(output_value, output_ptr); - store_inst.setAlignment(output_ptr_ty.ptrAlignment(target)); + store_inst.setAlignment(output_ptr_ty.ptrAlignment(mod)); } else { ret_val = output_value; } @@ -6622,7 +6662,8 @@ pub const FuncGen = struct { const optional_llvm_ty = try self.dg.lowerType(optional_ty); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); - if (optional_ty.optionalReprIsPayload()) { + const mod = self.dg.module; + if (optional_ty.optionalReprIsPayload(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") else @@ -6638,7 +6679,7 @@ pub const FuncGen = struct { comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") else @@ -6647,7 +6688,7 @@ pub const FuncGen = struct { return self.builder.buildICmp(pred, loaded, llvm_i8.constNull(), ""); } - const is_by_ref = operand_is_ptr or isByRef(optional_ty); + const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod); const non_null_bit = self.optIsNonNull(optional_llvm_ty, operand, is_by_ref); if (pred == .EQ) { return self.builder.buildNot(non_null_bit, ""); @@ -6662,6 +6703,7 @@ pub const FuncGen = struct { op: llvm.IntPredicate, operand_is_ptr: bool, ) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.air.typeOf(un_op); @@ -6679,7 +6721,7 @@ pub const FuncGen = struct { } } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(try self.dg.lowerType(err_union_ty), operand, "") else @@ -6687,10 +6729,9 @@ pub const FuncGen = struct { return self.builder.buildICmp(op, loaded, zero, ""); } - const target = self.dg.module.getTarget(); - const err_field_index = errUnionErrorOffset(payload_ty, target); + const err_field_index = errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty)) { + if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, err_field_index, ""); const loaded = self.builder.buildLoad(err_set_ty, err_field_ptr, ""); @@ -6702,17 +6743,18 @@ pub const FuncGen = struct { } fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. return operand; } - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // The payload and the optional are the same value. return operand; } @@ -6723,18 +6765,19 @@ pub const FuncGen = struct { fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { comptime assert(optional_layout_version == 3); + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); const non_null_bit = self.context.intType(8).constInt(1, .False); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. _ = self.builder.buildStore(non_null_bit, operand); return operand; } - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // The payload and the optional are the same value. // Setting to non-null will be done when the payload is set. return operand; @@ -6754,20 +6797,21 @@ pub const FuncGen = struct { } fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand); const payload_ty = self.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // Payload value is the same as the optional value. return operand; } const opt_llvm_ty = try self.dg.lowerType(optional_ty); - const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load); } @@ -6776,6 +6820,7 @@ pub const FuncGen = struct { body_tail: []const Air.Inst.Index, operand_is_ptr: bool, ) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -6783,25 +6828,24 @@ pub const FuncGen = struct { const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const result_ty = self.air.typeOfIndex(inst); const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; - const target = self.dg.module.getTarget(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return if (operand_is_ptr) operand else null; } - const offset = errUnionPayloadOffset(payload_ty, target); + const offset = errUnionPayloadOffset(payload_ty, mod); const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); if (operand_is_ptr) { return self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); - } else if (isByRef(err_union_ty)) { + } else if (isByRef(err_union_ty, mod)) { const payload_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); - if (isByRef(payload_ty)) { + if (isByRef(payload_ty, mod)) { if (self.canElideLoad(body_tail)) return payload_ptr; - return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false); + return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false); } const load_inst = self.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, ""); - load_inst.setAlignment(payload_ty.abiAlignment(target)); + load_inst.setAlignment(payload_ty.abiAlignment(mod)); return load_inst; } return self.builder.buildExtractValue(operand, offset, ""); @@ -6812,6 +6856,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, operand_is_ptr: bool, ) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); @@ -6828,15 +6873,14 @@ pub const FuncGen = struct { const err_set_llvm_ty = try self.dg.lowerType(Type.anyerror); const payload_ty = err_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (!operand_is_ptr) return operand; return self.builder.buildLoad(err_set_llvm_ty, operand, ""); } - const target = self.dg.module.getTarget(); - const offset = errUnionErrorOffset(payload_ty, target); + const offset = errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty)) { + if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); return self.builder.buildLoad(err_set_llvm_ty, err_field_ptr, ""); @@ -6846,30 +6890,30 @@ pub const FuncGen = struct { } fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const err_union_ty = self.air.typeOf(ty_op.operand).childType(); const payload_ty = err_union_ty.errorUnionPayload(); const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { _ = self.builder.buildStore(non_error_val, operand); return operand; } - const target = self.dg.module.getTarget(); const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); { - const error_offset = errUnionErrorOffset(payload_ty, target); + const error_offset = errUnionErrorOffset(payload_ty, mod); // First set the non-error value. const non_null_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, error_offset, ""); const store_inst = self.builder.buildStore(non_error_val, non_null_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(target)); + store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); } // Then return the payload pointer (only if it is used). if (self.liveness.isUnused(inst)) return null; - const payload_offset = errUnionPayloadOffset(payload_ty, target); + const payload_offset = errUnionPayloadOffset(payload_ty, mod); return self.builder.buildStructGEP(err_union_llvm_ty, operand, payload_offset, ""); } @@ -6885,15 +6929,14 @@ pub const FuncGen = struct { } fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const target = self.dg.module.getTarget(); - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; //const struct_ty = try self.resolveInst(ty_pl.ty); const struct_ty = self.air.getRefType(ty_pl.ty); const field_index = ty_pl.payload; var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + const mod = self.dg.module; + const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field_index, ""); const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); @@ -6901,20 +6944,20 @@ pub const FuncGen = struct { } fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.air.typeOf(ty_op.operand); const non_null_bit = self.context.intType(8).constInt(1, .False); comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return non_null_bit; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOfIndex(inst); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return operand; } const llvm_optional_ty = try self.dg.lowerType(optional_ty); - if (isByRef(optional_ty)) { - const target = self.dg.module.getTarget(); - const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(target)); + if (isByRef(optional_ty, mod)) { + const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -6931,24 +6974,24 @@ pub const FuncGen = struct { } fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_un_ty = self.air.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } const ok_err_code = (try self.dg.lowerType(Type.anyerror)).constNull(); const err_un_llvm_ty = try self.dg.lowerType(err_un_ty); - const target = self.dg.module.getTarget(); - const payload_offset = errUnionPayloadOffset(payload_ty, target); - const error_offset = errUnionErrorOffset(payload_ty, target); - if (isByRef(err_un_ty)) { - const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(target)); + const payload_offset = errUnionPayloadOffset(payload_ty, mod); + const error_offset = errUnionErrorOffset(payload_ty, mod); + if (isByRef(err_un_ty, mod)) { + const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); const store_inst = self.builder.buildStore(ok_err_code, err_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(target)); + store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -6964,23 +7007,23 @@ pub const FuncGen = struct { } fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_un_ty = self.air.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } const err_un_llvm_ty = try self.dg.lowerType(err_un_ty); - const target = self.dg.module.getTarget(); - const payload_offset = errUnionPayloadOffset(payload_ty, target); - const error_offset = errUnionErrorOffset(payload_ty, target); - if (isByRef(err_un_ty)) { - const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(target)); + const payload_offset = errUnionPayloadOffset(payload_ty, mod); + const error_offset = errUnionErrorOffset(payload_ty, mod); + if (isByRef(err_un_ty, mod)) { + const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); const store_inst = self.builder.buildStore(operand, err_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(target)); + store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -7021,6 +7064,7 @@ pub const FuncGen = struct { } fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const data = self.air.instructions.items(.data)[inst].vector_store_elem; const extra = self.air.extraData(Air.Bin, data.payload).data; @@ -7032,8 +7076,7 @@ pub const FuncGen = struct { const loaded_vector = blk: { const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType()); const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); - const target = self.dg.module.getTarget(); - load_inst.setAlignment(vector_ptr_ty.ptrAlignment(target)); + load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr())); break :blk load_inst; }; @@ -7043,24 +7086,26 @@ pub const FuncGen = struct { } fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(); + const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildSMin(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMin(lhs, rhs, ""); return self.builder.buildUMin(lhs, rhs, ""); } fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(); + const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildSMax(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMax(lhs, rhs, ""); return self.builder.buildUMax(lhs, rhs, ""); } @@ -7081,14 +7126,15 @@ pub const FuncGen = struct { fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWAdd(lhs, rhs, ""); return self.builder.buildNUWAdd(lhs, rhs, ""); } @@ -7103,14 +7149,15 @@ pub const FuncGen = struct { } fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); - if (scalar_ty.isSignedInt()) return self.builder.buildSAddSat(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSAddSat(lhs, rhs, ""); return self.builder.buildUAddSat(lhs, rhs, ""); } @@ -7118,14 +7165,15 @@ pub const FuncGen = struct { fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWSub(lhs, rhs, ""); return self.builder.buildNUWSub(lhs, rhs, ""); } @@ -7140,28 +7188,30 @@ pub const FuncGen = struct { } fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); - if (scalar_ty.isSignedInt()) return self.builder.buildSSubSat(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSSubSat(lhs, rhs, ""); return self.builder.buildUSubSat(lhs, rhs, ""); } fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWMul(lhs, rhs, ""); return self.builder.buildNUWMul(lhs, rhs, ""); } @@ -7176,14 +7226,15 @@ pub const FuncGen = struct { } fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); - if (scalar_ty.isSignedInt()) return self.builder.buildSMulFixSat(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMulFixSat(lhs, rhs, ""); return self.builder.buildUMulFixSat(lhs, rhs, ""); } @@ -7201,38 +7252,39 @@ pub const FuncGen = struct { fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); return self.buildFloatOp(.trunc, inst_ty, 1, .{result}); } - if (scalar_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSDiv(lhs, rhs, ""); return self.builder.buildUDiv(lhs, rhs, ""); } fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); return self.buildFloatOp(.floor, inst_ty, 1, .{result}); } - if (scalar_ty.isSignedInt()) { - const target = self.dg.module.getTarget(); + if (scalar_ty.isSignedInt(mod)) { const inst_llvm_ty = try self.dg.lowerType(inst_ty); - const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1; - const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: { + const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; + const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { const vec_len = inst_ty.vectorLen(); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -7258,40 +7310,43 @@ pub const FuncGen = struct { fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildExactSDiv(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildExactSDiv(lhs, rhs, ""); return self.builder.buildExactUDiv(lhs, rhs, ""); } fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSRem(lhs, rhs, ""); return self.builder.buildURem(lhs, rhs, ""); } fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); const inst_llvm_ty = try self.dg.lowerType(inst_ty); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const a = try self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); @@ -7301,10 +7356,9 @@ pub const FuncGen = struct { const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero }); return self.builder.buildSelect(ltz, c, a, ""); } - if (scalar_ty.isSignedInt()) { - const target = self.dg.module.getTarget(); - const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1; - const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: { + if (scalar_ty.isSignedInt(mod)) { + const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; + const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { const vec_len = inst_ty.vectorLen(); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -7386,6 +7440,7 @@ pub const FuncGen = struct { signed_intrinsic: []const u8, unsigned_intrinsic: []const u8, ) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -7393,16 +7448,14 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); - const scalar_ty = lhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); const dest_ty = self.air.typeOfIndex(inst); - const intrinsic_name = if (scalar_ty.isSignedInt()) signed_intrinsic else unsigned_intrinsic; + const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; const llvm_lhs_ty = try self.dg.lowerType(lhs_ty); const llvm_dest_ty = try self.dg.lowerType(dest_ty); - const tg = self.dg.module.getTarget(); - const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, ""); @@ -7410,12 +7463,11 @@ pub const FuncGen = struct { const overflow_bit = self.builder.buildExtractValue(result_struct, 1, ""); var ty_buf: Type.Payload.Pointer = undefined; - const result_index = llvmFieldIndex(dest_ty, 0, tg, &ty_buf).?; - const overflow_index = llvmFieldIndex(dest_ty, 1, tg, &ty_buf).?; + const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?; + const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?; - if (isByRef(dest_ty)) { - const target = self.dg.module.getTarget(); - const result_alignment = dest_ty.abiAlignment(target); + if (isByRef(dest_ty, mod)) { + const result_alignment = dest_ty.abiAlignment(mod); const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment); { const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); @@ -7486,8 +7538,9 @@ pub const FuncGen = struct { ty: Type, params: [2]*llvm.Value, ) !*llvm.Value { + const mod = self.dg.module; const target = self.dg.module.getTarget(); - const scalar_ty = ty.scalarType(); + const scalar_ty = ty.scalarType(mod); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); if (intrinsicsAllowed(scalar_ty, target)) { @@ -7531,7 +7584,7 @@ pub const FuncGen = struct { .gte => .SGE, }; - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const vec_len = ty.vectorLen(); const vector_result_ty = llvm_i32.vectorType(vec_len); @@ -7587,8 +7640,9 @@ pub const FuncGen = struct { comptime params_len: usize, params: [params_len]*llvm.Value, ) !*llvm.Value { - const target = self.dg.module.getTarget(); - const scalar_ty = ty.scalarType(); + const mod = self.dg.module; + const target = mod.getTarget(); + const scalar_ty = ty.scalarType(mod); const llvm_ty = try self.dg.lowerType(ty); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -7615,7 +7669,7 @@ pub const FuncGen = struct { const one = int_llvm_ty.constInt(1, .False); const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False); const sign_mask = one.constShl(shift_amt); - const result = if (ty.zigTypeTag() == .Vector) blk: { + const result = if (ty.zigTypeTag(mod) == .Vector) blk: { const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(), sign_mask, ""); const cast_ty = int_llvm_ty.vectorType(ty.vectorLen()); const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, ""); @@ -7662,7 +7716,7 @@ pub const FuncGen = struct { .libc => |fn_name| b: { const param_types = [3]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty, scalar_llvm_ty }; const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result = llvm_ty.getUndef(); return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen()); } @@ -7686,6 +7740,7 @@ pub const FuncGen = struct { } fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -7694,21 +7749,19 @@ pub const FuncGen = struct { const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); const dest_ty = self.air.typeOfIndex(inst); const llvm_dest_ty = try self.dg.lowerType(dest_ty); - const tg = self.dg.module.getTarget(); - - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; const result = self.builder.buildShl(lhs, casted_rhs, ""); - const reconstructed = if (lhs_scalar_ty.isSignedInt()) + const reconstructed = if (lhs_scalar_ty.isSignedInt(mod)) self.builder.buildAShr(result, casted_rhs, "") else self.builder.buildLShr(result, casted_rhs, ""); @@ -7716,12 +7769,11 @@ pub const FuncGen = struct { const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, ""); var ty_buf: Type.Payload.Pointer = undefined; - const result_index = llvmFieldIndex(dest_ty, 0, tg, &ty_buf).?; - const overflow_index = llvmFieldIndex(dest_ty, 1, tg, &ty_buf).?; + const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?; + const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?; - if (isByRef(dest_ty)) { - const target = self.dg.module.getTarget(); - const result_alignment = dest_ty.abiAlignment(target); + if (isByRef(dest_ty, mod)) { + const result_alignment = dest_ty.abiAlignment(mod); const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment); { const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); @@ -7763,6 +7815,7 @@ pub const FuncGen = struct { } fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7770,20 +7823,19 @@ pub const FuncGen = struct { const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - const tg = self.dg.module.getTarget(); - - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; - if (lhs_scalar_ty.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, ""); + if (lhs_scalar_ty.isSignedInt(mod)) return self.builder.buildNSWShl(lhs, casted_rhs, ""); return self.builder.buildNUWShl(lhs, casted_rhs, ""); } fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7791,12 +7843,10 @@ pub const FuncGen = struct { const lhs_type = self.air.typeOf(bin_op.lhs); const rhs_type = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_type.scalarType(); - const rhs_scalar_ty = rhs_type.scalarType(); - - const tg = self.dg.module.getTarget(); + const lhs_scalar_ty = lhs_type.scalarType(mod); + const rhs_scalar_ty = rhs_type.scalarType(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_type), "") else rhs; @@ -7804,6 +7854,7 @@ pub const FuncGen = struct { } fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7811,17 +7862,16 @@ pub const FuncGen = struct { const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const tg = self.dg.module.getTarget(); - const lhs_bits = lhs_scalar_ty.bitSize(tg); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const lhs_bits = lhs_scalar_ty.bitSize(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_bits) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_bits) self.builder.buildZExt(rhs, lhs.typeOf(), "") else rhs; - const result = if (lhs_scalar_ty.isSignedInt()) + const result = if (lhs_scalar_ty.isSignedInt(mod)) self.builder.buildSShlSat(lhs, casted_rhs, "") else self.builder.buildUShlSat(lhs, casted_rhs, ""); @@ -7834,7 +7884,7 @@ pub const FuncGen = struct { const lhs_scalar_llvm_ty = try self.dg.lowerType(lhs_scalar_ty); const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False); const lhs_max = lhs_scalar_llvm_ty.constAllOnes(); - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { const vec_len = rhs_ty.vectorLen(); const bits_vec = self.builder.buildVectorSplat(vec_len, bits, ""); const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, ""); @@ -7847,6 +7897,7 @@ pub const FuncGen = struct { } fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7854,16 +7905,14 @@ pub const FuncGen = struct { const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - - const tg = self.dg.module.getTarget(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; - const is_signed_int = lhs_scalar_ty.isSignedInt(); + const is_signed_int = lhs_scalar_ty.isSignedInt(mod); if (is_exact) { if (is_signed_int) { @@ -7881,14 +7930,14 @@ pub const FuncGen = struct { } fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const target = self.dg.module.getTarget(); + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dest_ty = self.air.typeOfIndex(inst); - const dest_info = dest_ty.intInfo(target); + const dest_info = dest_ty.intInfo(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - const operand_info = operand_ty.intInfo(target); + const operand_info = operand_ty.intInfo(mod); if (operand_info.bits < dest_info.bits) { switch (operand_info.signedness) { @@ -7910,11 +7959,12 @@ pub const FuncGen = struct { } fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -7939,11 +7989,12 @@ pub const FuncGen = struct { } fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -7985,10 +8036,10 @@ pub const FuncGen = struct { } fn bitCast(self: *FuncGen, operand: *llvm.Value, operand_ty: Type, inst_ty: Type) !*llvm.Value { - const operand_is_ref = isByRef(operand_ty); - const result_is_ref = isByRef(inst_ty); + const mod = self.dg.module; + const operand_is_ref = isByRef(operand_ty, mod); + const result_is_ref = isByRef(inst_ty, mod); const llvm_dest_ty = try self.dg.lowerType(inst_ty); - const target = self.dg.module.getTarget(); if (operand_is_ref and result_is_ref) { // They are both pointers, so just return the same opaque pointer :) @@ -8001,20 +8052,20 @@ pub const FuncGen = struct { return self.builder.buildZExtOrBitCast(operand, llvm_dest_ty, ""); } - if (operand_ty.zigTypeTag() == .Int and inst_ty.isPtrAtRuntime()) { + if (operand_ty.zigTypeTag(mod) == .Int and inst_ty.isPtrAtRuntime(mod)) { return self.builder.buildIntToPtr(operand, llvm_dest_ty, ""); } - if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) { + if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) { const elem_ty = operand_ty.childType(); if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } const array_ptr = self.buildAlloca(llvm_dest_ty, null); - const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; + const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { const llvm_store = self.builder.buildStore(operand, array_ptr); - llvm_store.setAlignment(inst_ty.abiAlignment(target)); + llvm_store.setAlignment(inst_ty.abiAlignment(mod)); } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. @@ -8033,19 +8084,19 @@ pub const FuncGen = struct { } } return array_ptr; - } else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) { + } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { const elem_ty = operand_ty.childType(); const llvm_vector_ty = try self.dg.lowerType(inst_ty); if (!operand_is_ref) { return self.dg.todo("implement bitcast non-ref array to vector", .{}); } - const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; + const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { const vector = self.builder.buildLoad(llvm_vector_ty, operand, ""); // The array is aligned to the element's alignment, while the vector might have a completely // different alignment. This means we need to enforce the alignment of this load. - vector.setAlignment(elem_ty.abiAlignment(target)); + vector.setAlignment(elem_ty.abiAlignment(mod)); return vector; } else { // If the ABI size of the element type is not evenly divisible by size in bits; @@ -8073,12 +8124,12 @@ pub const FuncGen = struct { if (operand_is_ref) { const load_inst = self.builder.buildLoad(llvm_dest_ty, operand, ""); - load_inst.setAlignment(operand_ty.abiAlignment(target)); + load_inst.setAlignment(operand_ty.abiAlignment(mod)); return load_inst; } if (result_is_ref) { - const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); + const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); const store_inst = self.builder.buildStore(operand, result_ptr); store_inst.setAlignment(alignment); @@ -8089,7 +8140,7 @@ pub const FuncGen = struct { // Both our operand and our result are values, not pointers, // but LLVM won't let us bitcast struct values. // Therefore, we store operand to alloca, then load for result. - const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); + const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); const store_inst = self.builder.buildStore(operand, result_ptr); store_inst.setAlignment(alignment); @@ -8118,12 +8169,13 @@ pub const FuncGen = struct { } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; + const mod = self.dg.module; const func = self.dg.decl.getFunction().?; - const lbrace_line = self.dg.module.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; + const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const di_local_var = dib.createParameterVariable( self.di_scope.?, - func.getParamName(self.dg.module, src_index).ptr, // TODO test 0 bit args + func.getParamName(mod, src_index).ptr, // TODO test 0 bit args self.di_file.?, lbrace_line, try self.dg.object.lowerDebugType(inst_ty, .full), @@ -8134,10 +8186,10 @@ pub const FuncGen = struct { const debug_loc = llvm.getDebugLoc(lbrace_line, lbrace_col, self.di_scope.?, null); const insert_block = self.builder.getInsertBlock(); - if (isByRef(inst_ty)) { + if (isByRef(inst_ty, mod)) { _ = dib.insertDeclareAtEnd(arg_val, di_local_var, debug_loc, insert_block); } else if (self.dg.module.comp.bin_file.options.optimize_mode == .Debug) { - const alignment = inst_ty.abiAlignment(self.dg.module.getTarget()); + const alignment = inst_ty.abiAlignment(mod); const alloca = self.buildAlloca(arg_val.typeOf(), alignment); const store_inst = self.builder.buildStore(arg_val, alloca); store_inst.setAlignment(alignment); @@ -8153,22 +8205,22 @@ pub const FuncGen = struct { fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ptr_ty = self.air.typeOfIndex(inst); const pointee_type = ptr_ty.childType(); - if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); + const mod = self.dg.module; + if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const pointee_llvm_ty = try self.dg.lowerType(pointee_type); - const target = self.dg.module.getTarget(); - const alignment = ptr_ty.ptrAlignment(target); + const alignment = ptr_ty.ptrAlignment(mod); return self.buildAlloca(pointee_llvm_ty, alignment); } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ptr_ty = self.air.typeOfIndex(inst); const ret_ty = ptr_ty.childType(); - if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); + const mod = self.dg.module; + if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = try self.dg.lowerType(ret_ty); - const target = self.dg.module.getTarget(); - return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(target)); + return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(mod)); } /// Use this instead of builder.buildAlloca, because this function makes sure to @@ -8182,8 +8234,9 @@ pub const FuncGen = struct { const dest_ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.air.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); + const mod = self.dg.module; - const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; + const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8193,13 +8246,12 @@ pub const FuncGen = struct { u8_llvm_ty.constInt(0xaa, .False) else u8_llvm_ty.getUndef(); - const target = self.dg.module.getTarget(); - const operand_size = operand_ty.abiSize(target); + const operand_size = operand_ty.abiSize(mod); const usize_llvm_ty = try self.dg.lowerType(Type.usize); const len = usize_llvm_ty.constInt(operand_size, .False); - const dest_ptr_align = ptr_ty.ptrAlignment(target); + const dest_ptr_align = ptr_ty.ptrAlignment(mod); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr()); - if (safety and self.dg.module.comp.bin_file.options.valgrind) { + if (safety and mod.comp.bin_file.options.valgrind) { self.valgrindMarkUndef(dest_ptr, len); } return null; @@ -8230,6 +8282,7 @@ pub const FuncGen = struct { } fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = fg.dg.module; const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[inst].ty_op; const ptr_ty = fg.air.typeOf(ty_op.operand); @@ -8237,7 +8290,7 @@ pub const FuncGen = struct { const ptr = try fg.resolveInst(ty_op.operand); elide: { - if (!isByRef(ptr_info.pointee_type)) break :elide; + if (!isByRef(ptr_info.pointee_type, mod)) break :elide; if (!canElideLoad(fg, body_tail)) break :elide; return ptr; } @@ -8261,8 +8314,9 @@ pub const FuncGen = struct { fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { _ = inst; + const mod = self.dg.module; const llvm_usize = try self.dg.lowerType(Type.usize); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); if (!target_util.supportsReturnAddress(target)) { // https://github.com/ziglang/zig/issues/11946 return llvm_usize.constNull(); @@ -8301,6 +8355,7 @@ pub const FuncGen = struct { } fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr = try self.resolveInst(extra.ptr); @@ -8310,7 +8365,7 @@ pub const FuncGen = struct { const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating - if (operand_ty.isSignedInt()) { + if (operand_ty.isSignedInt(mod)) { expected_value = self.builder.buildSExt(expected_value, abi_ty, ""); new_value = self.builder.buildSExt(new_value, abi_ty, ""); } else { @@ -8336,7 +8391,7 @@ pub const FuncGen = struct { } const success_bit = self.builder.buildExtractValue(result, 1, ""); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, ""); } @@ -8347,13 +8402,14 @@ pub const FuncGen = struct { } fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); const ptr_ty = self.air.typeOf(pl_op.operand); const operand_ty = ptr_ty.elemType(); const operand = try self.resolveInst(extra.operand); - const is_signed_int = operand_ty.isSignedInt(); + const is_signed_int = operand_ty.isSignedInt(mod); const is_float = operand_ty.isRuntimeFloat(); const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float); const ordering = toLlvmAtomicOrdering(extra.ordering()); @@ -8402,17 +8458,17 @@ pub const FuncGen = struct { } fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.air.typeOf(atomic_load.ptr); const ptr_info = ptr_ty.ptrInfo().data; const elem_ty = ptr_info.pointee_type; - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const ordering = toLlvmAtomicOrdering(atomic_load.order); const opt_abi_llvm_ty = self.dg.getAtomicAbiType(elem_ty, false); - const target = self.dg.module.getTarget(); - const ptr_alignment = ptr_info.alignment(target); + const ptr_alignment = ptr_info.alignment(mod); const ptr_volatile = llvm.Bool.fromBool(ptr_info.@"volatile"); const elem_llvm_ty = try self.dg.lowerType(elem_ty); @@ -8436,17 +8492,18 @@ pub const FuncGen = struct { inst: Air.Inst.Index, ordering: llvm.AtomicOrdering, ) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); - if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return null; + if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening - if (operand_ty.isSignedInt()) { + if (operand_ty.isSignedInt(mod)) { element = self.builder.buildSExt(element, abi_ty, ""); } else { element = self.builder.buildZExt(element, abi_ty, ""); @@ -8457,18 +8514,19 @@ pub const FuncGen = struct { } fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); const ptr_ty = self.air.typeOf(bin_op.lhs); const elem_ty = self.air.typeOf(bin_op.rhs); const module = self.dg.module; const target = module.getTarget(); - const dest_ptr_align = ptr_ty.ptrAlignment(target); + const dest_ptr_align = ptr_ty.ptrAlignment(mod); const u8_llvm_ty = self.context.intType(8); const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); const is_volatile = ptr_ty.isVolatilePtr(); - if (self.air.value(bin_op.rhs)) |elem_val| { + if (self.air.value(bin_op.rhs, mod)) |elem_val| { if (elem_val.isUndefDeep()) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8503,7 +8561,7 @@ pub const FuncGen = struct { } const value = try self.resolveInst(bin_op.rhs); - const elem_abi_size = elem_ty.abiSize(target); + const elem_abi_size = elem_ty.abiSize(mod); if (elem_abi_size == 1) { // In this case we can take advantage of LLVM's intrinsic. @@ -8551,9 +8609,9 @@ pub const FuncGen = struct { _ = self.builder.buildCondBr(end, body_block, end_block); self.builder.positionBuilderAtEnd(body_block); - const elem_abi_alignment = elem_ty.abiAlignment(target); + const elem_abi_alignment = elem_ty.abiAlignment(mod); const it_ptr_alignment = @min(elem_abi_alignment, dest_ptr_align); - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { _ = self.builder.buildMemCpy( it_ptr, it_ptr_alignment, @@ -8589,13 +8647,13 @@ pub const FuncGen = struct { const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty); const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); + const mod = self.dg.module; const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr(); - const target = self.dg.module.getTarget(); _ = self.builder.buildMemCpy( dest_ptr, - dest_ptr_ty.ptrAlignment(target), + dest_ptr_ty.ptrAlignment(mod), src_ptr, - src_ptr_ty.ptrAlignment(target), + src_ptr_ty.ptrAlignment(mod), len, is_volatile, ); @@ -8603,10 +8661,10 @@ pub const FuncGen = struct { } fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const un_ty = self.air.typeOf(bin_op.lhs).childType(); - const target = self.dg.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_ptr = try self.resolveInst(bin_op.lhs); const new_tag = try self.resolveInst(bin_op.rhs); @@ -8624,13 +8682,13 @@ pub const FuncGen = struct { } fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const un_ty = self.air.typeOf(ty_op.operand); - const target = self.dg.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_handle = try self.resolveInst(ty_op.operand); - if (isByRef(un_ty)) { + if (isByRef(un_ty, mod)) { const llvm_un_ty = try self.dg.lowerType(un_ty); if (layout.payload_size == 0) { return self.builder.buildLoad(llvm_un_ty, union_handle, ""); @@ -8666,6 +8724,7 @@ pub const FuncGen = struct { } fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); @@ -8679,9 +8738,8 @@ pub const FuncGen = struct { const result_ty = self.air.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); - const target = self.dg.module.getTarget(); - const bits = operand_ty.intInfo(target).bits; - const result_bits = result_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; + const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { @@ -8692,6 +8750,7 @@ pub const FuncGen = struct { } fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); @@ -8704,9 +8763,8 @@ pub const FuncGen = struct { const result_ty = self.air.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); - const target = self.dg.module.getTarget(); - const bits = operand_ty.intInfo(target).bits; - const result_bits = result_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; + const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { @@ -8717,10 +8775,10 @@ pub const FuncGen = struct { } fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { - const target = self.dg.module.getTarget(); + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); - var bits = operand_ty.intInfo(target).bits; + var bits = operand_ty.intInfo(mod).bits; assert(bits % 8 == 0); var operand = try self.resolveInst(ty_op.operand); @@ -8730,7 +8788,7 @@ pub const FuncGen = struct { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte // The truncated result at the end will be the correct bswap const scalar_llvm_ty = self.context.intType(bits + 8); - if (operand_ty.zigTypeTag() == .Vector) { + if (operand_ty.zigTypeTag(mod) == .Vector) { const vec_len = operand_ty.vectorLen(); operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len); @@ -8759,7 +8817,7 @@ pub const FuncGen = struct { const result_ty = self.air.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); - const result_bits = result_ty.intInfo(target).bits; + const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { @@ -8770,6 +8828,7 @@ pub const FuncGen = struct { } fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const error_set_ty = self.air.getRefType(ty_op.ty); @@ -8781,7 +8840,7 @@ pub const FuncGen = struct { const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len)); for (names) |name| { - const err_int = self.dg.module.global_error_set.get(name).?; + const err_int = mod.global_error_set.get(name).?; const this_tag_int_value = int: { var tag_val_payload: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -8841,8 +8900,7 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); + const int_tag_ty = enum_ty.intTagType(); const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; const llvm_ret_ty = try self.dg.lowerType(Type.bool); @@ -8923,11 +8981,9 @@ pub const FuncGen = struct { const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); const llvm_ret_ty = try self.dg.lowerType(slice_ty); const usize_llvm_ty = try self.dg.lowerType(Type.usize); - const target = self.dg.module.getTarget(); - const slice_alignment = slice_ty.abiAlignment(target); + const slice_alignment = slice_ty.abiAlignment(mod); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); + const int_tag_ty = enum_ty.intTagType(); const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); @@ -9057,6 +9113,7 @@ pub const FuncGen = struct { } fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolveInst(extra.a); @@ -9077,11 +9134,11 @@ pub const FuncGen = struct { for (values, 0..) |*val, i| { var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(self.dg.module, i, &buf); + const elem = mask.elemValueBuffer(mod, i, &buf); if (elem.isUndef()) { val.* = llvm_i32.getUndef(); } else { - const int = elem.toSignedInt(self.dg.module.getTarget()); + const int = elem.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); val.* = llvm_i32.constInt(unsigned, .False); } @@ -9157,7 +9214,8 @@ pub const FuncGen = struct { fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); const reduce = self.air.instructions.items(.data)[inst].reduce; const operand = try self.resolveInst(reduce.operand); @@ -9168,21 +9226,21 @@ pub const FuncGen = struct { .And => return self.builder.buildAndReduce(operand), .Or => return self.builder.buildOrReduce(operand), .Xor => return self.builder.buildXorReduce(operand), - .Min => switch (scalar_ty.zigTypeTag()) { - .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt()), + .Min => switch (scalar_ty.zigTypeTag(mod)) { + .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt(mod)), .Float => if (intrinsicsAllowed(scalar_ty, target)) { return self.builder.buildFPMinReduce(operand); }, else => unreachable, }, - .Max => switch (scalar_ty.zigTypeTag()) { - .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt()), + .Max => switch (scalar_ty.zigTypeTag(mod)) { + .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt(mod)), .Float => if (intrinsicsAllowed(scalar_ty, target)) { return self.builder.buildFPMaxReduce(operand); }, else => unreachable, }, - .Add => switch (scalar_ty.zigTypeTag()) { + .Add => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildAddReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -9191,7 +9249,7 @@ pub const FuncGen = struct { }, else => unreachable, }, - .Mul => switch (scalar_ty.zigTypeTag()) { + .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildMulReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -9247,9 +9305,9 @@ pub const FuncGen = struct { const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = try self.dg.lowerType(result_ty); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Vector => { const llvm_u32 = self.context.intType(32); @@ -9265,7 +9323,7 @@ pub const FuncGen = struct { if (result_ty.containerLayout() == .Packed) { const struct_obj = result_ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(target); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); @@ -9273,12 +9331,12 @@ pub const FuncGen = struct { var running_bits: u16 = 0; for (elements, 0..) |elem, i| { const field = fields[i]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const non_int_val = try self.resolveInst(elem); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const small_int_ty = self.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime()) + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else self.builder.buildBitCast(non_int_val, small_int_ty, ""); @@ -9296,24 +9354,24 @@ pub const FuncGen = struct { var ptr_ty_buf: Type.Payload.Pointer = undefined; - if (isByRef(result_ty)) { + if (isByRef(result_ty, mod)) { const llvm_u32 = self.context.intType(32); // TODO in debug builds init to undef so that the padding will be 0xaa // even if we fully populate the fields. - const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target)); + const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined }; for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(i) != null) continue; + if (result_ty.structFieldValueComptime(mod, i) != null) continue; const llvm_elem = try self.resolveInst(elem); - const llvm_i = llvmFieldIndex(result_ty, i, target, &ptr_ty_buf).?; + const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; indices[1] = llvm_u32.constInt(llvm_i, .False); const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); var field_ptr_payload: Type.Payload.Pointer = .{ .data = .{ .pointee_type = self.air.typeOf(elem), - .@"align" = result_ty.structFieldAlign(i, target), + .@"align" = result_ty.structFieldAlign(i, mod), .@"addrspace" = .generic, }, }; @@ -9325,20 +9383,20 @@ pub const FuncGen = struct { } else { var result = llvm_result_ty.getUndef(); for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(i) != null) continue; + if (result_ty.structFieldValueComptime(mod, i) != null) continue; const llvm_elem = try self.resolveInst(elem); - const llvm_i = llvmFieldIndex(result_ty, i, target, &ptr_ty_buf).?; + const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, ""); } return result; } }, .Array => { - assert(isByRef(result_ty)); + assert(isByRef(result_ty, mod)); const llvm_usize = try self.dg.lowerType(Type.usize); - const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target)); + const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); const array_info = result_ty.arrayInfo(); var elem_ptr_payload: Type.Payload.Pointer = .{ @@ -9379,22 +9437,22 @@ pub const FuncGen = struct { } fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = self.air.typeOfIndex(inst); const union_llvm_ty = try self.dg.lowerType(union_ty); - const target = self.dg.module.getTarget(); - const layout = union_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(mod); const union_obj = union_ty.cast(Type.Payload.Union).?.data; if (union_obj.layout == .Packed) { - const big_bits = union_ty.bitSize(target); + const big_bits = union_ty.bitSize(mod); const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); const field = union_obj.fields.values()[extra.field_index]; const non_int_val = try self.resolveInst(extra.init); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const small_int_ty = self.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime()) + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else self.builder.buildBitCast(non_int_val, small_int_ty, ""); @@ -9412,16 +9470,16 @@ pub const FuncGen = struct { const tag_val = Value.initPayload(&tag_val_payload.base); var int_payload: Value.Payload.U64 = undefined; const tag_int_val = tag_val.enumToInt(tag_ty, &int_payload); - break :blk tag_int_val.toUnsignedInt(target); + break :blk tag_int_val.toUnsignedInt(mod); }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { return null; } - assert(!isByRef(union_ty)); + assert(!isByRef(union_ty, mod)); return union_llvm_ty.constInt(tag_int, .False); } - assert(isByRef(union_ty)); + assert(isByRef(union_ty, mod)); // The llvm type of the alloca will be the named LLVM union type, and will not // necessarily match the format that we need, depending on which tag is active. // We must construct the correct unnamed struct type here, in order to then set @@ -9431,12 +9489,12 @@ pub const FuncGen = struct { assert(union_obj.haveFieldTypes()); const field = union_obj.fields.values()[extra.field_index]; const field_llvm_ty = try self.dg.lowerType(field.ty); - const field_size = field.ty.abiSize(target); - const field_align = field.normalAlignment(target); + const field_size = field.ty.abiSize(mod); + const field_align = field.normalAlignment(mod); const llvm_union_ty = t: { const payload = p: { - if (!field.ty.hasRuntimeBitsIgnoreComptime()) { + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) { const padding_len = @intCast(c_uint, layout.payload_size); break :p self.context.intType(8).arrayType(padding_len); } @@ -9511,7 +9569,7 @@ pub const FuncGen = struct { const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty); const llvm_tag = tag_llvm_ty.constInt(tag_int, .False); const store_inst = self.builder.buildStore(llvm_tag, field_ptr); - store_inst.setAlignment(union_obj.tag_ty.abiAlignment(target)); + store_inst.setAlignment(union_obj.tag_ty.abiAlignment(mod)); } return result_ptr; @@ -9535,7 +9593,8 @@ pub const FuncGen = struct { // by the target. // To work around this, don't emit llvm.prefetch in this case. // See https://bugs.llvm.org/show_bug.cgi?id=21037 - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); switch (prefetch.cache) { .instruction => switch (target.cpu.arch) { .x86_64, @@ -9658,8 +9717,9 @@ pub const FuncGen = struct { return table; } + const mod = self.dg.module; const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget()); + const slice_alignment = slice_ty.abiAlignment(mod); const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table"); @@ -9703,14 +9763,14 @@ pub const FuncGen = struct { ) !*llvm.Value { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); + const mod = fg.dg.module; - if (isByRef(opt_ty)) { + if (isByRef(opt_ty, mod)) { // We have a pointer and we need to return a pointer to the first field. const payload_ptr = fg.builder.buildStructGEP(opt_llvm_ty, opt_handle, 0, ""); - const target = fg.dg.module.getTarget(); - const payload_alignment = payload_ty.abiAlignment(target); - if (isByRef(payload_ty)) { + const payload_alignment = payload_ty.abiAlignment(mod); + if (isByRef(payload_ty, mod)) { if (can_elide_load) return payload_ptr; @@ -9722,7 +9782,7 @@ pub const FuncGen = struct { return load_inst; } - assert(!isByRef(payload_ty)); + assert(!isByRef(payload_ty, mod)); return fg.builder.buildExtractValue(opt_handle, 0, ""); } @@ -9734,10 +9794,10 @@ pub const FuncGen = struct { ) !?*llvm.Value { const optional_llvm_ty = try self.dg.lowerType(optional_ty); const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), ""); + const mod = self.dg.module; - if (isByRef(optional_ty)) { - const target = self.dg.module.getTarget(); - const payload_alignment = optional_ty.abiAlignment(target); + if (isByRef(optional_ty, mod)) { + const payload_alignment = optional_ty.abiAlignment(mod); const alloca_inst = self.buildAlloca(optional_llvm_ty, payload_alignment); { @@ -9765,9 +9825,9 @@ pub const FuncGen = struct { struct_ptr_ty: Type, field_index: u32, ) !?*llvm.Value { - const target = self.dg.object.target; const struct_ty = struct_ptr_ty.childType(); - switch (struct_ty.zigTypeTag()) { + const mod = self.dg.module; + switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { const result_ty = self.air.typeOfIndex(inst); @@ -9783,7 +9843,7 @@ pub const FuncGen = struct { // We have a pointer to a packed struct field that happens to be byte-aligned. // Offset our operand pointer by the correct number of bytes. - const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, target); + const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod); if (byte_offset == 0) return struct_ptr; const byte_llvm_ty = self.context.intType(8); const llvm_usize = try self.dg.lowerType(Type.usize); @@ -9795,7 +9855,7 @@ pub const FuncGen = struct { const struct_llvm_ty = try self.dg.lowerPtrElemTy(struct_ty); var ty_buf: Type.Payload.Pointer = undefined; - if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| { + if (llvmFieldIndex(struct_ty, field_index, mod, &ty_buf)) |llvm_field_index| { return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field_index, ""); } else { // If we found no index then this means this is a zero sized field at the @@ -9803,14 +9863,14 @@ pub const FuncGen = struct { // the index to the element at index `1` to get a pointer to the end of // the struct. const llvm_u32 = self.context.intType(32); - const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime()), .False); + const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); const indices: [1]*llvm.Value = .{llvm_index}; return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, ""); } }, }, .Union => { - const layout = struct_ty.unionGetLayout(target); + const layout = struct_ty.unionGetLayout(mod); if (layout.payload_size == 0 or struct_ty.containerLayout() == .Packed) return struct_ptr; const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const union_llvm_ty = try self.dg.lowerType(struct_ty); @@ -9835,12 +9895,12 @@ pub const FuncGen = struct { ptr_alignment: u32, is_volatile: bool, ) !*llvm.Value { + const mod = fg.dg.module; const pointee_llvm_ty = try fg.dg.lowerType(pointee_type); - const target = fg.dg.module.getTarget(); - const result_align = @max(ptr_alignment, pointee_type.abiAlignment(target)); + const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod)); const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align); - const llvm_usize = fg.context.intType(Type.usize.intInfo(target).bits); - const size_bytes = pointee_type.abiSize(target); + const llvm_usize = fg.context.intType(Type.usize.intInfo(mod).bits); + const size_bytes = pointee_type.abiSize(mod); _ = fg.builder.buildMemCpy( result_ptr, result_align, @@ -9856,11 +9916,11 @@ pub const FuncGen = struct { /// alloca and copies the value into it, then returns the alloca instruction. /// For isByRef=false types, it creates a load instruction and returns it. fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value { + const mod = self.dg.module; const info = ptr_ty.ptrInfo().data; - if (!info.pointee_type.hasRuntimeBitsIgnoreComptime()) return null; + if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null; - const target = self.dg.module.getTarget(); - const ptr_alignment = info.alignment(target); + const ptr_alignment = info.alignment(mod); const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr()); assert(info.vector_index != .runtime); @@ -9877,7 +9937,7 @@ pub const FuncGen = struct { } if (info.host_size == 0) { - if (isByRef(info.pointee_type)) { + if (isByRef(info.pointee_type, mod)) { return self.loadByRef(ptr, info.pointee_type, ptr_alignment, info.@"volatile"); } const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); @@ -9892,13 +9952,13 @@ pub const FuncGen = struct { containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target)); + const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod)); const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); - if (isByRef(info.pointee_type)) { - const result_align = info.pointee_type.abiAlignment(target); + if (isByRef(info.pointee_type, mod)) { + const result_align = info.pointee_type.abiAlignment(mod); const result_ptr = self.buildAlloca(elem_llvm_ty, result_align); const same_size_int = self.context.intType(elem_bits); @@ -9908,13 +9968,13 @@ pub const FuncGen = struct { return result_ptr; } - if (info.pointee_type.zigTypeTag() == .Float or info.pointee_type.zigTypeTag() == .Vector) { + if (info.pointee_type.zigTypeTag(mod) == .Float or info.pointee_type.zigTypeTag(mod) == .Vector) { const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } - if (info.pointee_type.isPtrAtRuntime()) { + if (info.pointee_type.isPtrAtRuntime(mod)) { const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -9932,11 +9992,11 @@ pub const FuncGen = struct { ) !void { const info = ptr_ty.ptrInfo().data; const elem_ty = info.pointee_type; - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + const mod = self.dg.module; + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { return; } - const target = self.dg.module.getTarget(); - const ptr_alignment = ptr_ty.ptrAlignment(target); + const ptr_alignment = ptr_ty.ptrAlignment(mod); const ptr_volatile = llvm.Bool.fromBool(info.@"volatile"); assert(info.vector_index != .runtime); @@ -9964,13 +10024,13 @@ pub const FuncGen = struct { assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target)); + const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod)); const containing_int_ty = containing_int.typeOf(); const shift_amt = containing_int_ty.constInt(info.bit_offset, .False); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store const value_bits_type = self.context.intType(elem_bits); - const value_bits = if (elem_ty.isPtrAtRuntime()) + const value_bits = if (elem_ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(elem, value_bits_type, "") else self.builder.buildBitCast(elem, value_bits_type, ""); @@ -9991,7 +10051,7 @@ pub const FuncGen = struct { store_inst.setVolatile(ptr_volatile); return; } - if (!isByRef(elem_ty)) { + if (!isByRef(elem_ty, mod)) { const store_inst = self.builder.buildStore(elem, ptr); store_inst.setOrdering(ordering); store_inst.setAlignment(ptr_alignment); @@ -9999,13 +10059,13 @@ pub const FuncGen = struct { return; } assert(ordering == .NotAtomic); - const size_bytes = elem_ty.abiSize(target); + const size_bytes = elem_ty.abiSize(mod); _ = self.builder.buildMemCpy( ptr, ptr_alignment, elem, - elem_ty.abiAlignment(target), - self.context.intType(Type.usize.intInfo(target).bits).constInt(size_bytes, .False), + elem_ty.abiAlignment(mod), + self.context.intType(Type.usize.intInfo(mod).bits).constInt(size_bytes, .False), info.@"volatile", ); } @@ -10030,11 +10090,12 @@ pub const FuncGen = struct { a4: *llvm.Value, a5: *llvm.Value, ) *llvm.Value { - const target = fg.dg.module.getTarget(); + const mod = fg.dg.module; + const target = mod.getTarget(); if (!target_util.hasValgrindSupport(target)) return default_value; const usize_llvm_ty = fg.context.intType(target.ptrBitWidth()); - const usize_alignment = @intCast(c_uint, Type.usize.abiSize(target)); + const usize_alignment = @intCast(c_uint, Type.usize.abiSize(mod)); const array_llvm_ty = usize_llvm_ty.arrayType(6); const array_ptr = fg.valgrind_client_request_array orelse a: { @@ -10451,7 +10512,7 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ fn llvmFieldIndex( ty: Type, field_index: usize, - target: std.Target, + mod: *const Module, ptr_pl_buf: *Type.Payload.Pointer, ) ?c_uint { // Detects where we inserted extra padding fields so that we can skip @@ -10464,9 +10525,9 @@ fn llvmFieldIndex( const tuple = ty.tupleFields(); var llvm_field_index: c_uint = 0; for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -10488,7 +10549,7 @@ fn llvmFieldIndex( } llvm_field_index += 1; - offset += field_ty.abiSize(target); + offset += field_ty.abiSize(mod); } return null; } @@ -10496,10 +10557,10 @@ fn llvmFieldIndex( assert(layout != .Packed); var llvm_field_index: c_uint = 0; - var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(); + var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_align = field.alignment(target, layout); + const field_align = field.alignment(mod, layout); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -10521,43 +10582,44 @@ fn llvmFieldIndex( } llvm_field_index += 1; - offset += field.ty.abiSize(target); + offset += field.ty.abiSize(mod); } else { // We did not find an llvm field that corresponds to this zig field. return null; } } -fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool { - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) return false; +fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *const Module) bool { + if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) return false; + const target = mod.getTarget(); switch (fn_info.cc) { - .Unspecified, .Inline => return isByRef(fn_info.return_type), + .Unspecified, .Inline => return isByRef(fn_info.return_type, mod), .C => switch (target.cpu.arch) { .mips, .mipsel => return false, .x86_64 => switch (target.os.tag) { - .windows => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory, - else => return firstParamSRetSystemV(fn_info.return_type, target), + .windows => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory, + else => return firstParamSRetSystemV(fn_info.return_type, mod), }, - .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, target)[0] == .indirect, - .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, target) == .memory, - .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) { + .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, mod)[0] == .indirect, + .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, mod) == .memory, + .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) { .memory, .i64_array => return true, .i32_array => |size| return size != 1, .byval => return false, }, - .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, target) == .memory, + .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, mod) == .memory, else => return false, // TODO investigate C ABI for other architectures }, - .SysV => return firstParamSRetSystemV(fn_info.return_type, target), - .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory, - .Stdcall => return !isScalar(fn_info.return_type), + .SysV => return firstParamSRetSystemV(fn_info.return_type, mod), + .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory, + .Stdcall => return !isScalar(mod, fn_info.return_type), else => return false, } } -fn firstParamSRetSystemV(ty: Type, target: std.Target) bool { - const class = x86_64_abi.classifySystemV(ty, target, .ret); +fn firstParamSRetSystemV(ty: Type, mod: *const Module) bool { + const class = x86_64_abi.classifySystemV(ty, mod, .ret); if (class[0] == .memory) return true; if (class[0] == .x87 and class[2] != .none) return true; return false; @@ -10567,20 +10629,21 @@ fn firstParamSRetSystemV(ty: Type, target: std.Target) bool { /// completely differently in the function prototype to honor the C ABI, and then /// be effectively bitcasted to the actual return type. fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) { + const mod = dg.module; + if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. - if (fn_info.return_type.isError()) { + if (fn_info.return_type.isError(mod)) { return dg.lowerType(Type.anyerror); } else { return dg.context.voidType(); } } - const target = dg.module.getTarget(); + const target = mod.getTarget(); switch (fn_info.cc) { .Unspecified, .Inline => { - if (isByRef(fn_info.return_type)) { + if (isByRef(fn_info.return_type, mod)) { return dg.context.voidType(); } else { return dg.lowerType(fn_info.return_type); @@ -10594,33 +10657,33 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { else => return lowerSystemVFnRetTy(dg, fn_info), }, .wasm32 => { - if (isScalar(fn_info.return_type)) { + if (isScalar(mod, fn_info.return_type)) { return dg.lowerType(fn_info.return_type); } - const classes = wasm_c_abi.classifyType(fn_info.return_type, target); + const classes = wasm_c_abi.classifyType(fn_info.return_type, mod); if (classes[0] == .indirect or classes[0] == .none) { return dg.context.voidType(); } assert(classes[0] == .direct and classes[1] == .none); - const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, target); - const abi_size = scalar_type.abiSize(target); + const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, mod); + const abi_size = scalar_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); }, .aarch64, .aarch64_be => { - switch (aarch64_c_abi.classifyType(fn_info.return_type, target)) { + switch (aarch64_c_abi.classifyType(fn_info.return_type, mod)) { .memory => return dg.context.voidType(), .float_array => return dg.lowerType(fn_info.return_type), .byval => return dg.lowerType(fn_info.return_type), .integer => { - const bit_size = fn_info.return_type.bitSize(target); + const bit_size = fn_info.return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => return dg.context.intType(64).arrayType(2), } }, .arm, .armeb => { - switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) { + switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) { .memory, .i64_array => return dg.context.voidType(), .i32_array => |len| if (len == 1) { return dg.context.intType(32); @@ -10631,10 +10694,10 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { } }, .riscv32, .riscv64 => { - switch (riscv_c_abi.classifyType(fn_info.return_type, target)) { + switch (riscv_c_abi.classifyType(fn_info.return_type, mod)) { .memory => return dg.context.voidType(), .integer => { - const bit_size = fn_info.return_type.bitSize(target); + const bit_size = fn_info.return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => { @@ -10654,7 +10717,7 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { .Win64 => return lowerWin64FnRetTy(dg, fn_info), .SysV => return lowerSystemVFnRetTy(dg, fn_info), .Stdcall => { - if (isScalar(fn_info.return_type)) { + if (isScalar(mod, fn_info.return_type)) { return dg.lowerType(fn_info.return_type); } else { return dg.context.voidType(); @@ -10665,13 +10728,13 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { } fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { - const target = dg.module.getTarget(); - switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) { + const mod = dg.module; + switch (x86_64_abi.classifyWindows(fn_info.return_type, mod)) { .integer => { - if (isScalar(fn_info.return_type)) { + if (isScalar(mod, fn_info.return_type)) { return dg.lowerType(fn_info.return_type); } else { - const abi_size = fn_info.return_type.abiSize(target); + const abi_size = fn_info.return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } }, @@ -10683,11 +10746,11 @@ fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.T } fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { - if (isScalar(fn_info.return_type)) { + const mod = dg.module; + if (isScalar(mod, fn_info.return_type)) { return dg.lowerType(fn_info.return_type); } - const target = dg.module.getTarget(); - const classes = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret); + const classes = x86_64_abi.classifySystemV(fn_info.return_type, mod, .ret); if (classes[0] == .memory) { return dg.context.voidType(); } @@ -10728,7 +10791,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm } } if (classes[0] == .integer and classes[1] == .none) { - const abi_size = fn_info.return_type.abiSize(target); + const abi_size = fn_info.return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False); @@ -10739,7 +10802,6 @@ const ParamTypeIterator = struct { fn_info: Type.Payload.Function.Data, zig_index: u32, llvm_index: u32, - target: std.Target, llvm_types_len: u32, llvm_types_buffer: [8]*llvm.Type, byval_attr: bool, @@ -10779,7 +10841,10 @@ const ParamTypeIterator = struct { } fn nextInner(it: *ParamTypeIterator, ty: Type) ?Lowering { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + const mod = it.dg.module; + const target = mod.getTarget(); + + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { it.zig_index += 1; return .no_bits; } @@ -10788,10 +10853,10 @@ const ParamTypeIterator = struct { it.zig_index += 1; it.llvm_index += 1; var buf: Type.Payload.ElemType = undefined; - if (ty.isSlice() or (ty.zigTypeTag() == .Optional and ty.optionalChild(&buf).isSlice())) { + if (ty.isSlice() or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(&buf).isSlice())) { it.llvm_index += 1; return .slice; - } else if (isByRef(ty)) { + } else if (isByRef(ty, mod)) { return .byref; } else { return .byval; @@ -10801,23 +10866,23 @@ const ParamTypeIterator = struct { @panic("TODO implement async function lowering in the LLVM backend"); }, .C => { - switch (it.target.cpu.arch) { + switch (target.cpu.arch) { .mips, .mipsel => { it.zig_index += 1; it.llvm_index += 1; return .byval; }, - .x86_64 => switch (it.target.os.tag) { + .x86_64 => switch (target.os.tag) { .windows => return it.nextWin64(ty), else => return it.nextSystemV(ty), }, .wasm32 => { it.zig_index += 1; it.llvm_index += 1; - if (isScalar(ty)) { + if (isScalar(mod, ty)) { return .byval; } - const classes = wasm_c_abi.classifyType(ty, it.target); + const classes = wasm_c_abi.classifyType(ty, mod); if (classes[0] == .indirect) { return .byref; } @@ -10826,7 +10891,7 @@ const ParamTypeIterator = struct { .aarch64, .aarch64_be => { it.zig_index += 1; it.llvm_index += 1; - switch (aarch64_c_abi.classifyType(ty, it.target)) { + switch (aarch64_c_abi.classifyType(ty, mod)) { .memory => return .byref_mut, .float_array => |len| return Lowering{ .float_array = len }, .byval => return .byval, @@ -10841,7 +10906,7 @@ const ParamTypeIterator = struct { .arm, .armeb => { it.zig_index += 1; it.llvm_index += 1; - switch (arm_c_abi.classifyType(ty, it.target, .arg)) { + switch (arm_c_abi.classifyType(ty, mod, .arg)) { .memory => { it.byval_attr = true; return .byref; @@ -10857,7 +10922,7 @@ const ParamTypeIterator = struct { if (ty.tag() == .f16) { return .as_u16; } - switch (riscv_c_abi.classifyType(ty, it.target)) { + switch (riscv_c_abi.classifyType(ty, mod)) { .memory => return .byref_mut, .byval => return .byval, .integer => return .abi_sized_int, @@ -10878,7 +10943,7 @@ const ParamTypeIterator = struct { it.zig_index += 1; it.llvm_index += 1; - if (isScalar(ty)) { + if (isScalar(mod, ty)) { return .byval; } else { it.byval_attr = true; @@ -10894,9 +10959,10 @@ const ParamTypeIterator = struct { } fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering { - switch (x86_64_abi.classifyWindows(ty, it.target)) { + const mod = it.dg.module; + switch (x86_64_abi.classifyWindows(ty, mod)) { .integer => { - if (isScalar(ty)) { + if (isScalar(mod, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -10926,14 +10992,15 @@ const ParamTypeIterator = struct { } fn nextSystemV(it: *ParamTypeIterator, ty: Type) ?Lowering { - const classes = x86_64_abi.classifySystemV(ty, it.target, .arg); + const mod = it.dg.module; + const classes = x86_64_abi.classifySystemV(ty, mod, .arg); if (classes[0] == .memory) { it.zig_index += 1; it.llvm_index += 1; it.byval_attr = true; return .byref; } - if (isScalar(ty)) { + if (isScalar(mod, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -10992,7 +11059,6 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp .fn_info = fn_info, .zig_index = 0, .llvm_index = 0, - .target = dg.module.getTarget(), .llvm_types_buffer = undefined, .llvm_types_len = 0, .byval_attr = false, @@ -11001,16 +11067,17 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp fn ccAbiPromoteInt( cc: std.builtin.CallingConvention, - target: std.Target, + mod: *const Module, ty: Type, ) ?std.builtin.Signedness { + const target = mod.getTarget(); switch (cc) { .Unspecified, .Inline, .Async => return null, else => {}, } - const int_info = switch (ty.zigTypeTag()) { - .Bool => Type.u1.intInfo(target), - .Int, .Enum, .ErrorSet => ty.intInfo(target), + const int_info = switch (ty.zigTypeTag(mod)) { + .Bool => Type.u1.intInfo(mod), + .Int, .Enum, .ErrorSet => ty.intInfo(mod), else => return null, }; if (int_info.bits <= 16) return int_info.signedness; @@ -11039,12 +11106,12 @@ fn ccAbiPromoteInt( /// This is the one source of truth for whether a type is passed around as an LLVM pointer, /// or as an LLVM value. -fn isByRef(ty: Type) bool { +fn isByRef(ty: Type, mod: *const Module) bool { // For tuples and structs, if there are more than this many non-void // fields, then we make it byref, otherwise byval. const max_fields_byval = 0; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeInt, .ComptimeFloat, @@ -11067,7 +11134,7 @@ fn isByRef(ty: Type) bool { .AnyFrame, => return false, - .Array, .Frame => return ty.hasRuntimeBits(), + .Array, .Frame => return ty.hasRuntimeBits(mod), .Struct => { // Packed structs are represented to LLVM as integers. if (ty.containerLayout() == .Packed) return false; @@ -11075,32 +11142,32 @@ fn isByRef(ty: Type) bool { const tuple = ty.tupleFields(); var count: usize = 0; for (tuple.values, 0..) |field_val, i| { - if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits()) continue; + if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits(mod)) continue; count += 1; if (count > max_fields_byval) return true; - if (isByRef(tuple.types[i])) return true; + if (isByRef(tuple.types[i], mod)) return true; } return false; } var count: usize = 0; const fields = ty.structFields(); for (fields.values()) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; count += 1; if (count > max_fields_byval) return true; - if (isByRef(field.ty)) return true; + if (isByRef(field.ty, mod)) return true; } return false; }, .Union => switch (ty.containerLayout()) { .Packed => return false, - else => return ty.hasRuntimeBits(), + else => return ty.hasRuntimeBits(mod), }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } return true; @@ -11108,10 +11175,10 @@ fn isByRef(ty: Type) bool { .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { return false; } return true; @@ -11119,8 +11186,8 @@ fn isByRef(ty: Type) bool { } } -fn isScalar(ty: Type) bool { - return switch (ty.zigTypeTag()) { +fn isScalar(mod: *const Module, ty: Type) bool { + return switch (ty.zigTypeTag(mod)) { .Void, .Bool, .NoReturn, @@ -11304,12 +11371,12 @@ fn buildAllocaInner( return alloca; } -fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u1 { - return @boolToInt(Type.anyerror.abiAlignment(target) > payload_ty.abiAlignment(target)); +fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u1 { + return @boolToInt(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod)); } -fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u1 { - return @boolToInt(Type.anyerror.abiAlignment(target) <= payload_ty.abiAlignment(target)); +fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u1 { + return @boolToInt(Type.anyerror.abiAlignment(mod) <= payload_ty.abiAlignment(mod)); } /// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 09ace669a9bf..b8c8466427df 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -231,9 +231,10 @@ pub const DeclGen = struct { /// Fetch the result-id for a previously generated instruction or constant. fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { - if (self.air.value(inst)) |val| { + const mod = self.module; + if (self.air.value(inst, mod)) |val| { const ty = self.air.typeOf(inst); - if (ty.zigTypeTag() == .Fn) { + if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (val.tag()) { .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, .function => val.castTag(.function).?.data.owner_decl, @@ -340,8 +341,9 @@ pub const DeclGen = struct { } fn arithmeticTypeInfo(self: *DeclGen, ty: Type) !ArithmeticTypeInfo { + const mod = self.module; const target = self.getTarget(); - return switch (ty.zigTypeTag()) { + return switch (ty.zigTypeTag(mod)) { .Bool => ArithmeticTypeInfo{ .bits = 1, // Doesn't matter for this class. .is_vector = false, @@ -355,7 +357,7 @@ pub const DeclGen = struct { .class = .float, }, .Int => blk: { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); // TODO: Maybe it's useful to also return this value. const maybe_backing_bits = self.backingIntBits(int_info.bits); break :blk ArithmeticTypeInfo{ @@ -533,21 +535,22 @@ pub const DeclGen = struct { } fn addInt(self: *@This(), ty: Type, val: Value) !void { - const target = self.dg.getTarget(); - const int_info = ty.intInfo(target); + const mod = self.dg.module; + const int_info = ty.intInfo(mod); const int_bits = switch (int_info.signedness) { - .signed => @bitCast(u64, val.toSignedInt(target)), - .unsigned => val.toUnsignedInt(target), + .signed => @bitCast(u64, val.toSignedInt(mod)), + .unsigned => val.toUnsignedInt(mod), }; // TODO: Swap endianess if the compiler is big endian. - const len = ty.abiSize(target); + const len = ty.abiSize(mod); try self.addBytes(std.mem.asBytes(&int_bits)[0..@intCast(usize, len)]); } fn addFloat(self: *@This(), ty: Type, val: Value) !void { + const mod = self.dg.module; const target = self.dg.getTarget(); - const len = ty.abiSize(target); + const len = ty.abiSize(mod); // TODO: Swap endianess if the compiler is big endian. switch (ty.floatBits(target)) { @@ -607,15 +610,15 @@ pub const DeclGen = struct { } fn lower(self: *@This(), ty: Type, val: Value) !void { - const target = self.dg.getTarget(); const dg = self.dg; + const mod = dg.module; if (val.isUndef()) { - const size = ty.abiSize(target); + const size = ty.abiSize(mod); return try self.addUndef(size); } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Int => try self.addInt(ty, val), .Float => try self.addFloat(ty, val), .Bool => try self.addConstBool(val.toBool()), @@ -644,7 +647,7 @@ pub const DeclGen = struct { const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; try self.addBytes(bytes); if (ty.sentinel()) |sentinel| { - try self.addByte(@intCast(u8, sentinel.toUnsignedInt(target))); + try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod))); } }, .bytes => { @@ -690,13 +693,13 @@ pub const DeclGen = struct { const struct_begin = self.size; const field_vals = val.castTag(.aggregate).?.data; for (struct_ty.fields.values(), 0..) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; try self.lower(field.ty, field_vals[i]); // Add padding if required. // TODO: Add to type generation as well? const unpadded_field_end = self.size - struct_begin; - const padded_field_end = ty.structFieldOffset(i + 1, target); + const padded_field_end = ty.structFieldOffset(i + 1, mod); const padding = padded_field_end - unpadded_field_end; try self.addUndef(padding); } @@ -705,13 +708,13 @@ pub const DeclGen = struct { .Optional => { var opt_buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&opt_buf); - const has_payload = !val.isNull(); - const abi_size = ty.abiSize(target); + const has_payload = !val.isNull(mod); + const abi_size = ty.abiSize(mod); - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBits(mod)) { try self.addConstBool(has_payload); return; - } else if (ty.optionalReprIsPayload()) { + } else if (ty.optionalReprIsPayload(mod)) { // Optional representation is a nullable pointer or slice. if (val.castTag(.opt_payload)) |payload| { try self.lower(payload_ty, payload.data); @@ -729,7 +732,7 @@ pub const DeclGen = struct { // Subtract 1 for @sizeOf(bool). // TODO: Make this not hardcoded. - const payload_size = payload_ty.abiSize(target); + const payload_size = payload_ty.abiSize(mod); const padding = abi_size - payload_size - 1; if (val.castTag(.opt_payload)) |payload| { @@ -744,14 +747,13 @@ pub const DeclGen = struct { var int_val_buffer: Value.Payload.U64 = undefined; const int_val = val.enumToInt(ty, &int_val_buffer); - var int_ty_buffer: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&int_ty_buffer); + const int_ty = ty.intTagType(); try self.lower(int_ty, int_val); }, .Union => { const tag_and_val = val.castTag(.@"union").?.data; - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { return try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); @@ -772,9 +774,9 @@ pub const DeclGen = struct { try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); } - const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: { + const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { try self.lower(active_field_ty, tag_and_val.val); - break :blk active_field_ty.abiSize(target); + break :blk active_field_ty.abiSize(mod); } else 0; const payload_padding_len = layout.payload_size - active_field_size; @@ -808,9 +810,9 @@ pub const DeclGen = struct { return try self.lower(Type.anyerror, error_val); } - const payload_size = payload_ty.abiSize(target); - const error_size = Type.anyerror.abiAlignment(target); - const ty_size = ty.abiSize(target); + const payload_size = payload_ty.abiSize(mod); + const error_size = Type.anyerror.abiAlignment(mod); + const ty_size = ty.abiSize(mod); const padding = ty_size - payload_size - error_size; const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef); @@ -886,7 +888,7 @@ pub const DeclGen = struct { // .id_result = result_id, // .storage_class = storage_class, // }); - // } else if (ty.abiSize(target) == 0) { + // } else if (ty.abiSize(mod) == 0) { // // Special case: if the type has no size, then return an undefined pointer. // return try section.emit(self.spv.gpa, .OpUndef, .{ // .id_result_type = self.typeId(ptr_ty_ref), @@ -968,6 +970,7 @@ pub const DeclGen = struct { /// is then loaded using OpLoad. Such values are loaded into the UniformConstant storage class by default. /// This function should only be called during function code generation. fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef { + const mod = self.module; const target = self.getTarget(); const result_ty_ref = try self.resolveType(ty, repr); @@ -977,12 +980,12 @@ pub const DeclGen = struct { return self.spv.constUndef(result_ty_ref); } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Int => { - if (ty.isSignedInt()) { - return try self.spv.constInt(result_ty_ref, val.toSignedInt(target)); + if (ty.isSignedInt(mod)) { + return try self.spv.constInt(result_ty_ref, val.toSignedInt(mod)); } else { - return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(target)); + return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(mod)); } }, .Bool => switch (repr) { @@ -1037,7 +1040,7 @@ pub const DeclGen = struct { // The value cannot be generated directly, so generate it as an indirect constant, // and then perform an OpLoad. const result_id = self.spv.allocId(); - const alignment = ty.abiAlignment(target); + const alignment = ty.abiAlignment(mod); const spv_decl_index = try self.spv.allocDecl(.global); try self.lowerIndirectConstant( @@ -1114,8 +1117,8 @@ pub const DeclGen = struct { /// NOTE: When the active field is set to something other than the most aligned field, the /// resulting struct will be *underaligned*. fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !CacheRef { - const target = self.getTarget(); - const layout = ty.unionGetLayout(target); + const mod = self.module; + const layout = ty.unionGetLayout(mod); const union_ty = ty.cast(Type.Payload.Union).?.data; if (union_ty.layout == .Packed) { @@ -1143,11 +1146,11 @@ pub const DeclGen = struct { const active_field = maybe_active_field orelse layout.most_aligned_field; const active_field_ty = union_ty.fields.values()[active_field].ty; - const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: { + const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { const active_payload_ty_ref = try self.resolveType(active_field_ty, .indirect); member_types.appendAssumeCapacity(active_payload_ty_ref); member_names.appendAssumeCapacity(try self.spv.resolveString("payload")); - break :blk active_field_ty.abiSize(target); + break :blk active_field_ty.abiSize(mod); } else 0; const payload_padding_len = layout.payload_size - active_field_size; @@ -1177,21 +1180,21 @@ pub const DeclGen = struct { /// Turn a Zig type into a SPIR-V Type, and return a reference to it. fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef { + const mod = self.module; log.debug("resolveType: ty = {}", .{ty.fmt(self.module)}); const target = self.getTarget(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => return try self.spv.resolve(.void_type), .Bool => switch (repr) { .direct => return try self.spv.resolve(.bool_type), .indirect => return try self.intType(.unsigned, 1), }, .Int => { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); return try self.intType(int_info.signedness, int_info.bits); }, .Enum => { - var buffer: Type.Payload.Bits = undefined; - const tag_ty = ty.intTagType(&buffer); + const tag_ty = ty.intTagType(); return self.resolveType(tag_ty, repr); }, .Float => { @@ -1290,7 +1293,7 @@ pub const DeclGen = struct { var member_index: usize = 0; for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; member_types[member_index] = try self.resolveType(field_ty, .indirect); member_index += 1; @@ -1315,7 +1318,7 @@ pub const DeclGen = struct { var member_index: usize = 0; for (struct_ty.fields.values(), 0..) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; member_types[member_index] = try self.resolveType(field.ty, .indirect); member_names[member_index] = try self.spv.resolveString(struct_ty.fields.keys()[i]); @@ -1334,7 +1337,7 @@ pub const DeclGen = struct { .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // Just use a bool. // Note: Always generate the bool with indirect format, to save on some sanity // Perform the conversion to a direct bool when the field is extracted. @@ -1342,7 +1345,7 @@ pub const DeclGen = struct { } const payload_ty_ref = try self.resolveType(payload_ty, .indirect); - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { // Optional is actually a pointer or a slice. return payload_ty_ref; } @@ -1445,14 +1448,14 @@ pub const DeclGen = struct { }; fn errorUnionLayout(self: *DeclGen, payload_ty: Type) ErrorUnionLayout { - const target = self.getTarget(); + const mod = self.module; - const error_align = Type.anyerror.abiAlignment(target); - const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(mod); + const payload_align = payload_ty.abiAlignment(mod); const error_first = error_align > payload_align; return .{ - .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(), + .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod), .error_first = error_first, }; } @@ -1529,14 +1532,15 @@ pub const DeclGen = struct { } fn genDecl(self: *DeclGen) !void { - const decl = self.module.declPtr(self.decl_index); + const mod = self.module; + const decl = mod.declPtr(self.decl_index); const spv_decl_index = try self.resolveDecl(self.decl_index); const decl_id = self.spv.declPtr(spv_decl_index).result_id; log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name }); if (decl.val.castTag(.function)) |_| { - assert(decl.ty.zigTypeTag() == .Fn); + assert(decl.ty.zigTypeTag(mod) == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()), @@ -1634,7 +1638,8 @@ pub const DeclGen = struct { /// Convert representation from indirect (in memory) to direct (in 'register') /// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct). fn convertToDirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { - return switch (ty.zigTypeTag()) { + const mod = self.module; + return switch (ty.zigTypeTag(mod)) { .Bool => blk: { const direct_bool_ty_ref = try self.resolveType(ty, .direct); const indirect_bool_ty_ref = try self.resolveType(ty, .indirect); @@ -1655,7 +1660,8 @@ pub const DeclGen = struct { /// Convert representation from direct (in 'register) to direct (in memory) /// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect). fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { - return switch (ty.zigTypeTag()) { + const mod = self.module; + return switch (ty.zigTypeTag(mod)) { .Bool => blk: { const indirect_bool_ty_ref = try self.resolveType(ty, .indirect); break :blk self.boolToInt(indirect_bool_ty_ref, operand_id); @@ -2056,6 +2062,7 @@ pub const DeclGen = struct { } fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; if (self.liveness.isUnused(inst)) return null; const ty = self.air.typeOfIndex(inst); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -2083,7 +2090,7 @@ pub const DeclGen = struct { if (elem.isUndef()) { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { - const int = elem.toSignedInt(self.getTarget()); + const int = elem.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); self.func.body.writeOperand(spec.LiteralInteger, unsigned); } @@ -2189,13 +2196,13 @@ pub const DeclGen = struct { lhs_id: IdRef, rhs_id: IdRef, ) !IdRef { + const mod = self.module; var cmp_lhs_id = lhs_id; var cmp_rhs_id = rhs_id; const opcode: Opcode = opcode: { - var int_buffer: Type.Payload.Bits = undefined; - const op_ty = switch (ty.zigTypeTag()) { + const op_ty = switch (ty.zigTypeTag(mod)) { .Int, .Bool, .Float => ty, - .Enum => ty.intTagType(&int_buffer), + .Enum => ty.intTagType(), .ErrorSet => Type.u16, .Pointer => blk: { // Note that while SPIR-V offers OpPtrEqual and OpPtrNotEqual, they are @@ -2303,13 +2310,14 @@ pub const DeclGen = struct { src_ty: Type, src_id: IdRef, ) !IdRef { + const mod = self.module; const dst_ty_ref = try self.resolveType(dst_ty, .direct); const result_id = self.spv.allocId(); // TODO: Some more cases are missing here // See fn bitCast in llvm.zig - if (src_ty.zigTypeTag() == .Int and dst_ty.isPtrAtRuntime()) { + if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) { try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{ .id_result_type = self.typeId(dst_ty_ref), .id_result = result_id, @@ -2342,8 +2350,8 @@ pub const DeclGen = struct { const dest_ty = self.air.typeOfIndex(inst); const dest_ty_id = try self.resolveTypeId(dest_ty); - const target = self.getTarget(); - const dest_info = dest_ty.intInfo(target); + const mod = self.module; + const dest_info = dest_ty.intInfo(mod); // TODO: Masking? @@ -2485,8 +2493,9 @@ pub const DeclGen = struct { } fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef { + const mod = self.module; // Construct new pointer type for the resulting pointer - const elem_ty = ptr_ty.elemType2(); // use elemType() so that we get T for *[N]T. + const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. const elem_ty_ref = try self.resolveType(elem_ty, .direct); const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace())); if (ptr_ty.isSinglePointer()) { @@ -2502,12 +2511,13 @@ pub const DeclGen = struct { fn airPtrElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.air.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); // TODO: Make this return a null ptr or something - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const ptr_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2536,8 +2546,8 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const un_ty = self.air.typeOf(ty_op.operand); - const target = self.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const mod = self.module; + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_handle = try self.resolve(ty_op.operand); @@ -2551,6 +2561,7 @@ pub const DeclGen = struct { fn airStructFieldVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -2559,9 +2570,9 @@ pub const DeclGen = struct { const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - assert(struct_ty.zigTypeTag() == .Struct); // Cannot do unions yet. + assert(struct_ty.zigTypeTag(mod) == .Struct); // Cannot do unions yet. return try self.extractField(field_ty, object_id, field_index); } @@ -2573,8 +2584,9 @@ pub const DeclGen = struct { object_ptr: IdRef, field_index: u32, ) !?IdRef { + const mod = self.module; const object_ty = object_ptr_ty.childType(); - switch (object_ty.zigTypeTag()) { + switch (object_ty.zigTypeTag(mod)) { .Struct => switch (object_ty.containerLayout()) { .Packed => unreachable, // TODO else => { @@ -2667,6 +2679,7 @@ pub const DeclGen = struct { // the current block by first generating the code of the block, then a label, and then generate the rest of the current // ir.Block in a different SPIR-V block. + const mod = self.module; const label_id = self.spv.allocId(); // 4 chosen as arbitrary initial capacity. @@ -2690,7 +2703,7 @@ pub const DeclGen = struct { try self.beginSpvBlock(label_id); // If this block didn't produce a value, simply return here. - if (!ty.hasRuntimeBitsIgnoreComptime()) + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return null; // Combine the result from the blocks using the Phi instruction. @@ -2716,7 +2729,8 @@ pub const DeclGen = struct { const block = self.blocks.get(br.block_inst).?; const operand_ty = self.air.typeOf(br.operand); - if (operand_ty.hasRuntimeBits()) { + const mod = self.module; + if (operand_ty.hasRuntimeBits(mod)) { const operand_id = try self.resolve(br.operand); // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body. try block.incoming_blocks.append(self.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); @@ -2771,13 +2785,14 @@ pub const DeclGen = struct { } fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); const ptr = try self.resolve(bin_op.lhs); const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); - const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; + const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { const undef = try self.spv.constUndef(ptr_ty_ref); try self.store(ptr_ty, ptr, undef); @@ -2805,7 +2820,8 @@ pub const DeclGen = struct { fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { const operand = self.air.instructions.items(.data)[inst].un_op; const operand_ty = self.air.typeOf(operand); - if (operand_ty.hasRuntimeBits()) { + const mod = self.module; + if (operand_ty.hasRuntimeBits(mod)) { const operand_id = try self.resolve(operand); try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id }); } else { @@ -2814,11 +2830,12 @@ pub const DeclGen = struct { } fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { try self.func.body.emit(self.spv.gpa, .OpReturn, {}); return; } @@ -2946,6 +2963,7 @@ pub const DeclGen = struct { fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_null, is_non_null }) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_id = try self.resolve(un_op); const optional_ty = self.air.typeOf(un_op); @@ -2955,7 +2973,7 @@ pub const DeclGen = struct { const bool_ty_ref = try self.resolveType(Type.bool, .direct); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // Pointer payload represents nullability: pointer or slice. var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; @@ -2985,7 +3003,7 @@ pub const DeclGen = struct { return result_id; } - const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime()) + const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime(mod)) try self.extractField(Type.bool, operand_id, 1) else // Optional representation is bool indicating whether the optional is set @@ -3009,14 +3027,15 @@ pub const DeclGen = struct { fn airUnwrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand); const payload_ty = self.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; } @@ -3026,16 +3045,17 @@ pub const DeclGen = struct { fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try self.constBool(true, .direct); } const operand_id = try self.resolve(ty_op.operand); const optional_ty = self.air.typeOfIndex(inst); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; } @@ -3045,30 +3065,29 @@ pub const DeclGen = struct { } fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void { - const target = self.getTarget(); + const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolve(pl_op.operand); const cond_ty = self.air.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); - const cond_words: u32 = switch (cond_ty.zigTypeTag()) { + const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) { .Int => blk: { - const bits = cond_ty.intInfo(target).bits; + const bits = cond_ty.intInfo(mod).bits; const backing_bits = self.backingIntBits(bits) orelse { return self.todo("implement composite int switch", .{}); }; break :blk if (backing_bits <= 32) @as(u32, 1) else 2; }, .Enum => blk: { - var buffer: Type.Payload.Bits = undefined; - const int_ty = cond_ty.intTagType(&buffer); - const int_info = int_ty.intInfo(target); + const int_ty = cond_ty.intTagType(); + const int_info = int_ty.intInfo(mod); const backing_bits = self.backingIntBits(int_info.bits) orelse { return self.todo("implement composite int switch", .{}); }; break :blk if (backing_bits <= 32) @as(u32, 1) else 2; }, - else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag())}), // TODO: Figure out which types apply here, and work around them as we can only do integers. + else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}), // TODO: Figure out which types apply here, and work around them as we can only do integers. }; const num_cases = switch_br.data.cases_len; @@ -3112,15 +3131,15 @@ pub const DeclGen = struct { const label = IdRef{ .id = first_case_label.id + case_i }; for (items) |item| { - const value = self.air.value(item) orelse { + const value = self.air.value(item, mod) orelse { return self.todo("switch on runtime value???", .{}); }; - const int_val = switch (cond_ty.zigTypeTag()) { - .Int => if (cond_ty.isSignedInt()) @bitCast(u64, value.toSignedInt(target)) else value.toUnsignedInt(target), + const int_val = switch (cond_ty.zigTypeTag(mod)) { + .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod), .Enum => blk: { var int_buffer: Value.Payload.U64 = undefined; // TODO: figure out of cond_ty is correct (something with enum literals) - break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(target); // TODO: composite integer constants + break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(mod); // TODO: composite integer constants }, else => unreachable, }; @@ -3294,11 +3313,12 @@ pub const DeclGen = struct { fn airCall(self: *DeclGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef { _ = modifier; + const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const callee_ty = self.air.typeOf(pl_op.operand); - const zig_fn_ty = switch (callee_ty.zigTypeTag()) { + const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => return self.fail("cannot call function pointers", .{}), else => unreachable, @@ -3320,7 +3340,7 @@ pub const DeclGen = struct { // temporary params buffer. const arg_id = try self.resolve(arg); const arg_ty = self.air.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; params[n_params] = arg_id; n_params += 1; @@ -3337,7 +3357,7 @@ pub const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpUnreachable, {}); } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) { + if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { return null; } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 62a208406e65..6117f1c1de13 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1123,7 +1123,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In }, }; - const required_alignment = tv.ty.abiAlignment(self.base.options.target); + const required_alignment = tv.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); atom.size = @intCast(u32, code.len); atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment); @@ -1299,7 +1299,8 @@ pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom. fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { const decl = self.base.options.module.?.declPtr(decl_index); const ty = decl.ty; - const zig_ty = ty.zigTypeTag(); + const mod = self.base.options.module.?; + const zig_ty = ty.zigTypeTag(mod); const val = decl.val; const index: u16 = blk: { if (val.isUndefDeep()) { @@ -1330,7 +1331,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple defer gpa.free(decl_name); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 1d358a29ab61..9c6e54ea9846 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -169,16 +169,16 @@ pub const DeclState = struct { fn addDbgInfoType( self: *DeclState, - module: *Module, + mod: *Module, atom_index: Atom.Index, ty: Type, ) error{OutOfMemory}!void { const arena = self.abbrev_type_arena.allocator(); const dbg_info_buffer = &self.dbg_info; - const target = module.getTarget(); + const target = mod.getTarget(); const target_endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .NoReturn => unreachable, .Void => { try dbg_info_buffer.append(@enumToInt(AbbrevKind.pad1)); @@ -189,12 +189,12 @@ pub const DeclState = struct { // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(DW.ATE.boolean); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); }, .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.base_type)); // DW.AT.encoding, DW.FORM.data1 @@ -203,20 +203,20 @@ pub const DeclState = struct { .unsigned => DW.ATE.unsigned, }); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.base_type)); // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(DW.ATE.address); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); } else { // Non-pointer optionals are structs: struct { .maybe = *, .val = * } var buf = try arena.create(Type.Payload.ElemType); @@ -224,10 +224,10 @@ pub const DeclState = struct { // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata - const abi_size = ty.abiSize(target); + const abi_size = ty.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(7); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -251,7 +251,7 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata - const offset = abi_size - payload_ty.abiSize(target); + const offset = abi_size - payload_ty.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), offset); // DW.AT.structure_type delimit children try dbg_info_buffer.append(0); @@ -266,9 +266,9 @@ pub const DeclState = struct { try dbg_info_buffer.ensureUnusedCapacity(2); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(5); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -311,7 +311,7 @@ pub const DeclState = struct { // DW.AT.array_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_type)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); @@ -332,12 +332,12 @@ pub const DeclState = struct { // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); switch (ty.tag()) { .tuple, .anon_struct => { // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); const fields = ty.tupleFields(); for (fields.types, 0..) |field, field_index| { @@ -350,13 +350,13 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata - const field_off = ty.structFieldOffset(field_index, target); + const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, else => { // DW.AT.name, DW.FORM.string - const struct_name = try ty.nameAllocArena(arena, module); + const struct_name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1); dbg_info_buffer.appendSliceAssumeCapacity(struct_name); dbg_info_buffer.appendAssumeCapacity(0); @@ -370,7 +370,7 @@ pub const DeclState = struct { const fields = ty.structFields(); for (fields.keys(), 0..) |field_name, field_index| { const field = fields.get(field_name).?; - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -382,7 +382,7 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata - const field_off = ty.structFieldOffset(field_index, target); + const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, @@ -395,9 +395,9 @@ pub const DeclState = struct { // DW.AT.enumeration_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - const enum_name = try ty.nameAllocArena(arena, module); + const enum_name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.ensureUnusedCapacity(enum_name.len + 1); dbg_info_buffer.appendSliceAssumeCapacity(enum_name); dbg_info_buffer.appendAssumeCapacity(0); @@ -424,7 +424,7 @@ pub const DeclState = struct { // See https://github.com/ziglang/zig/issues/645 var int_buffer: Value.Payload.U64 = undefined; const field_int_val = value.enumToInt(ty, &int_buffer); - break :value @bitCast(u64, field_int_val.toSignedInt(target)); + break :value @bitCast(u64, field_int_val.toSignedInt(mod)); } else @intCast(u64, field_i); mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian); } @@ -433,12 +433,12 @@ pub const DeclState = struct { try dbg_info_buffer.append(0); }, .Union => { - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); const union_obj = ty.cast(Type.Payload.Union).?.data; const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0; const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size; const is_tagged = layout.tag_size > 0; - const union_name = try ty.nameAllocArena(arena, module); + const union_name = try ty.nameAllocArena(arena, mod); // TODO this is temporary to match current state of unions in Zig - we don't yet have // safety checks implemented meaning the implicit tag is not yet stored and generated @@ -481,7 +481,7 @@ pub const DeclState = struct { const fields = ty.unionFields(); for (fields.keys()) |field_name| { const field = fields.get(field_name).?; - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; // DW.AT.member try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member)); // DW.AT.name, DW.FORM.string @@ -517,7 +517,7 @@ pub const DeclState = struct { .ErrorSet => { try addDbgInfoErrorSet( self.abbrev_type_arena.allocator(), - module, + mod, ty, target, &self.dbg_info, @@ -526,18 +526,18 @@ pub const DeclState = struct { .ErrorUnion => { const error_ty = ty.errorUnionSet(); const payload_ty = ty.errorUnionPayload(); - const payload_align = if (payload_ty.isNoReturn()) 0 else payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - const abi_size = ty.abiSize(target); - const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(target) else 0; - const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(target); + const payload_align = if (payload_ty.isNoReturn()) 0 else payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const abi_size = ty.abiSize(mod); + const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(mod) else 0; + const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(mod); // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - const name = try ty.nameAllocArena(arena, module); + const name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.writer().print("{s}\x00", .{name}); if (!payload_ty.isNoReturn()) { @@ -685,7 +685,8 @@ pub const DeclState = struct { const atom_index = self.di_atom_decls.get(owner_decl).?; const name_with_null = name.ptr[0 .. name.len + 1]; try dbg_info.append(@enumToInt(AbbrevKind.variable)); - const target = self.mod.getTarget(); + const mod = self.mod; + const target = mod.getTarget(); const endian = target.cpu.arch.endian(); const child_ty = if (is_ptr) ty.childType() else ty; @@ -790,9 +791,9 @@ pub const DeclState = struct { const fixup = dbg_info.items.len; dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc 1, - if (child_ty.isSignedInt()) DW.OP.consts else DW.OP.constu, + if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu, }); - if (child_ty.isSignedInt()) { + if (child_ty.isSignedInt(mod)) { try leb128.writeILEB128(dbg_info.writer(), @bitCast(i64, x)); } else { try leb128.writeULEB128(dbg_info.writer(), x); @@ -805,7 +806,7 @@ pub const DeclState = struct { // DW.AT.location, DW.FORM.exprloc // uleb128(exprloc_len) // DW.OP.implicit_value uleb128(len_of_bytes) bytes - const abi_size = @intCast(u32, child_ty.abiSize(target)); + const abi_size = @intCast(u32, child_ty.abiSize(mod)); var implicit_value_len = std.ArrayList(u8).init(self.gpa); defer implicit_value_len.deinit(); try leb128.writeULEB128(implicit_value_len.writer(), abi_size); @@ -979,7 +980,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) assert(decl.has_tv); - switch (decl.ty.zigTypeTag()) { + switch (decl.ty.zigTypeTag(mod)) { .Fn => { _ = try self.getOrCreateAtomForDecl(.src_fn, decl_index); @@ -1027,7 +1028,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len); const fn_ret_type = decl.ty.fnReturnType(); - const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(); + const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod); if (fn_ret_has_bits) { dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.subprogram)); } else { @@ -1059,7 +1060,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) pub fn commitDeclState( self: *Dwarf, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, sym_addr: u64, sym_size: u64, @@ -1071,12 +1072,12 @@ pub fn commitDeclState( const gpa = self.allocator; var dbg_line_buffer = &decl_state.dbg_line; var dbg_info_buffer = &decl_state.dbg_info; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const target_endian = self.target.cpu.arch.endian(); assert(decl.has_tv); - switch (decl.ty.zigTypeTag()) { + switch (decl.ty.zigTypeTag(mod)) { .Fn => { // Since the Decl is a function, we need to update the .debug_line program. // Perform the relocations based on vaddr. @@ -1283,7 +1284,7 @@ pub fn commitDeclState( if (deferred) continue; symbol.offset = @intCast(u32, dbg_info_buffer.items.len); - try decl_state.addDbgInfoType(module, di_atom_index, ty); + try decl_state.addDbgInfoType(mod, di_atom_index, ty); } } @@ -1319,7 +1320,7 @@ pub fn commitDeclState( reloc.offset, value, target, - ty.fmt(module), + ty.fmt(mod), }); mem.writeInt( u32, @@ -2663,7 +2664,7 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct { fn addDbgInfoErrorSet( arena: Allocator, - module: *Module, + mod: *Module, ty: Type, target: std.Target, dbg_info_buffer: *std.ArrayList(u8), @@ -2673,10 +2674,10 @@ fn addDbgInfoErrorSet( // DW.AT.enumeration_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type)); // DW.AT.byte_size, DW.FORM.udata - const abi_size = Type.anyerror.abiSize(target); + const abi_size = Type.anyerror.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - const name = try ty.nameAllocArena(arena, module); + const name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.writer().print("{s}\x00", .{name}); // DW.AT.enumerator @@ -2691,7 +2692,7 @@ fn addDbgInfoErrorSet( const error_names = ty.errorSetNames(); for (error_names) |error_name| { - const kv = module.getErrorValue(error_name) catch unreachable; + const kv = mod.getErrorValue(error_name) catch unreachable; // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(error_name.len + 2 + @sizeOf(u64)); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant)); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 2a28f880ac48..7bd36a9b6091 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2449,9 +2449,10 @@ pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.I } fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 { - const decl = self.base.options.module.?.declPtr(decl_index); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); const ty = decl.ty; - const zig_ty = ty.zigTypeTag(); + const zig_ty = ty.zigTypeTag(mod); const val = decl.val; const shdr_index: u16 = blk: { if (val.isUndefDeep()) { @@ -2482,7 +2483,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s defer self.base.allocator.free(decl_name); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; @@ -2826,7 +2827,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module }, }; - const required_alignment = typed_value.ty.abiAlignment(self.base.options.target); + const required_alignment = typed_value.ty.abiAlignment(mod); const shdr_index = self.rodata_section_index.?; const phdr_index = self.sections.items(.phdr_index)[shdr_index]; const local_sym = self.getAtom(atom_index).getSymbolPtr(self); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index a346ec756fee..306661c5c58e 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1948,7 +1948,8 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu }, }; - const required_alignment = typed_value.ty.abiAlignment(self.base.options.target); + const mod = self.base.options.module.?; + const required_alignment = typed_value.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); atom.size = code.len; // TODO: work out logic for disambiguating functions from function pointers @@ -2152,6 +2153,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.In } fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void { + const mod = self.base.options.module.?; // Lowering a TLV on macOS involves two stages: // 1. first we lower the initializer into appopriate section (__thread_data or __thread_bss) // 2. next, we create a corresponding threadlocal variable descriptor in __thread_vars @@ -2202,7 +2204,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D }, }; - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_name = try decl.getFullyQualifiedName(module); defer gpa.free(decl_name); @@ -2262,7 +2264,8 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { const decl = self.base.options.module.?.declPtr(decl_index); const ty = decl.ty; const val = decl.val; - const zig_ty = ty.zigTypeTag(); + const mod = self.base.options.module.?; + const zig_ty = ty.zigTypeTag(mod); const mode = self.base.options.optimize_mode; const single_threaded = self.base.options.single_threaded; const sect_id: u8 = blk: { @@ -2301,7 +2304,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64 const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_name = try decl.getFullyQualifiedName(mod); defer gpa.free(decl_name); diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 6d74e17dfd44..7a389a789d9b 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -432,8 +432,9 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) } /// called at the end of update{Decl,Func} fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void { - const decl = self.base.options.module.?.declPtr(decl_index); - const is_fn = (decl.ty.zigTypeTag() == .Fn); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); + const is_fn = (decl.ty.zigTypeTag(mod) == .Fn); log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name }); const sym_t: aout.Sym.Type = if (is_fn) .t else .d; @@ -704,7 +705,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No log.debug("relocating the address of '{s}' + {d} into '{s}' + {d}", .{ target_decl.name, addend, source_decl.name, offset }); const code = blk: { - const is_fn = source_decl.ty.zigTypeTag() == .Fn; + const is_fn = source_decl.ty.zigTypeTag(mod) == .Fn; if (is_fn) { const table = self.fn_decl_table.get(source_decl.getFileScope()).?.functions; const output = table.get(source_decl_index).?; @@ -1031,7 +1032,7 @@ pub fn getDeclVAddr( ) !u64 { const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { var start = self.bases.text; var it_file = self.fn_decl_table.iterator(); while (it_file.next()) |fentry| { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index befd2d68c91e..0154207368e0 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1473,7 +1473,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8 atom.size = @intCast(u32, code.len); if (code.len == 0) return; - atom.alignment = decl.ty.abiAlignment(wasm.base.options.target); + atom.alignment = decl.ty.abiAlignment(mod); } /// From a given symbol location, returns its `wasm.GlobalType`. @@ -1523,9 +1523,8 @@ fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type { /// Returns the symbol index of the local /// The given `decl` is the parent decl whom owns the constant. pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.Index) !u32 { - assert(tv.ty.zigTypeTag() != .Fn); // cannot create local symbols for functions - const mod = wasm.base.options.module.?; + assert(tv.ty.zigTypeTag(mod) != .Fn); // cannot create local symbols for functions const decl = mod.declPtr(decl_index); // Create and initialize a new local symbol and atom @@ -1543,7 +1542,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In const code = code: { const atom = wasm.getAtomPtr(atom_index); - atom.alignment = tv.ty.abiAlignment(wasm.base.options.target); + atom.alignment = tv.ty.abiAlignment(mod); wasm.symbols.items[atom.sym_index] = .{ .name = try wasm.string_table.put(wasm.base.allocator, name), .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL), @@ -1632,7 +1631,7 @@ pub fn getDeclVAddr( const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?; const atom = wasm.getAtomPtr(atom_index); const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32; - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { assert(reloc_info.addend == 0); // addend not allowed for function relocations // We found a function pointer, so add it to our table, // as function pointers are not allowed to be stored inside the data section. @@ -2933,7 +2932,8 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 { const atom_index = try wasm.createAtom(); const atom = wasm.getAtomPtr(atom_index); const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - atom.alignment = slice_ty.abiAlignment(wasm.base.options.target); + const mod = wasm.base.options.module.?; + atom.alignment = slice_ty.abiAlignment(mod); const sym_index = atom.sym_index; const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_name_table"); @@ -3000,7 +3000,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { .offset = offset, .addend = @intCast(i32, addend), }); - atom.size += @intCast(u32, slice_ty.abiSize(wasm.base.options.target)); + atom.size += @intCast(u32, slice_ty.abiSize(mod)); addend += len; // as we updated the error name table, we now store the actual name within the names atom @@ -3369,7 +3369,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod if (decl.isExtern()) continue; const atom_index = entry.value_ptr.*; const atom = wasm.getAtomPtr(atom_index); - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { try wasm.parseAtom(atom_index, .function); } else if (decl.getVariable()) |variable| { if (!variable.is_mutable) { diff --git a/src/print_air.zig b/src/print_air.zig index 2e8ab1a6422d..e8875ff01834 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -7,6 +7,7 @@ const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const Air = @import("Air.zig"); const Liveness = @import("Liveness.zig"); +const InternPool = @import("InternPool.zig"); pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) void { const instruction_bytes = air.instructions.len * @@ -965,14 +966,13 @@ const Writer = struct { operand: Air.Inst.Ref, dies: bool, ) @TypeOf(s).Error!void { - var i: usize = @enumToInt(operand); + const i = @enumToInt(operand); - if (i < Air.Inst.Ref.typed_value_map.len) { + if (i < InternPool.static_len) { return s.print("@{}", .{operand}); } - i -= Air.Inst.Ref.typed_value_map.len; - return w.writeInstIndex(s, @intCast(Air.Inst.Index, i), dies); + return w.writeInstIndex(s, i - InternPool.static_len, dies); } fn writeInstIndex( diff --git a/src/print_zir.zig b/src/print_zir.zig index cfa68424d0bb..a2178bbb49fb 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -3,6 +3,7 @@ const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; const Ast = std.zig.Ast; +const InternPool = @import("InternPool.zig"); const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); @@ -2468,14 +2469,9 @@ const Writer = struct { } fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void { - var i: usize = @enumToInt(ref); - - if (i < Zir.Inst.Ref.typed_value_map.len) { - return stream.print("@{}", .{ref}); - } - i -= Zir.Inst.Ref.typed_value_map.len; - - return self.writeInstIndex(stream, @intCast(Zir.Inst.Index, i)); + const i = @enumToInt(ref); + if (i < InternPool.static_len) return stream.print("@{}", .{@intToEnum(InternPool.Index, i)}); + return self.writeInstIndex(stream, i - InternPool.static_len); } fn writeInstIndex(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { diff --git a/src/target.zig b/src/target.zig index 5e66c8f41771..c89f8ce92ccf 100644 --- a/src/target.zig +++ b/src/target.zig @@ -512,134 +512,6 @@ pub fn needUnwindTables(target: std.Target) bool { return target.os.tag == .windows; } -pub const AtomicPtrAlignmentError = error{ - FloatTooBig, - IntTooBig, - BadType, -}; - -pub const AtomicPtrAlignmentDiagnostics = struct { - bits: u16 = undefined, - max_bits: u16 = undefined, -}; - -/// If ABI alignment of `ty` is OK for atomic operations, returns 0. -/// Otherwise returns the alignment required on a pointer for the target -/// to perform atomic operations. -// TODO this function does not take into account CPU features, which can affect -// this value. Audit this! -pub fn atomicPtrAlignment( - target: std.Target, - ty: Type, - diags: *AtomicPtrAlignmentDiagnostics, -) AtomicPtrAlignmentError!u32 { - const max_atomic_bits: u16 = switch (target.cpu.arch) { - .avr, - .msp430, - .spu_2, - => 16, - - .arc, - .arm, - .armeb, - .hexagon, - .m68k, - .le32, - .mips, - .mipsel, - .nvptx, - .powerpc, - .powerpcle, - .r600, - .riscv32, - .sparc, - .sparcel, - .tce, - .tcele, - .thumb, - .thumbeb, - .x86, - .xcore, - .amdil, - .hsail, - .spir, - .kalimba, - .lanai, - .shave, - .wasm32, - .renderscript32, - .csky, - .spirv32, - .dxil, - .loongarch32, - .xtensa, - => 32, - - .amdgcn, - .bpfel, - .bpfeb, - .le64, - .mips64, - .mips64el, - .nvptx64, - .powerpc64, - .powerpc64le, - .riscv64, - .sparc64, - .s390x, - .amdil64, - .hsail64, - .spir64, - .wasm64, - .renderscript64, - .ve, - .spirv64, - .loongarch64, - => 64, - - .aarch64, - .aarch64_be, - .aarch64_32, - => 128, - - .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64, - }; - - var buffer: Type.Payload.Bits = undefined; - - const int_ty = switch (ty.zigTypeTag()) { - .Int => ty, - .Enum => ty.intTagType(&buffer), - .Float => { - const bit_count = ty.floatBits(target); - if (bit_count > max_atomic_bits) { - diags.* = .{ - .bits = bit_count, - .max_bits = max_atomic_bits, - }; - return error.FloatTooBig; - } - return 0; - }, - .Bool => return 0, - else => { - if (ty.isPtrAtRuntime()) return 0; - return error.BadType; - }, - }; - - const bit_count = int_ty.intInfo(target).bits; - if (bit_count > max_atomic_bits) { - diags.* = .{ - .bits = bit_count, - .max_bits = max_atomic_bits, - }; - return error.IntTooBig; - } - - return 0; -} - pub fn defaultAddressSpace( target: std.Target, context: enum { diff --git a/src/type.zig b/src/type.zig index e5b41e717bad..259079a26c14 100644 --- a/src/type.zig +++ b/src/type.zig @@ -9,27 +9,102 @@ const log = std.log.scoped(.Type); const target_util = @import("target.zig"); const TypedValue = @import("TypedValue.zig"); const Sema = @import("Sema.zig"); +const InternPool = @import("InternPool.zig"); const file_struct = @This(); -/// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication. -/// It's important for this type to be small. -/// Types are not de-duplicated, which helps with multi-threading since it obviates the requirement -/// of obtaining a lock on a global type table, as well as making the -/// garbage collection bookkeeping simpler. -/// This union takes advantage of the fact that the first page of memory -/// is unmapped, giving us 4096 possible enum tags that have no payload. -pub const Type = extern union { - /// If the tag value is less than Tag.no_payload_count, then no pointer - /// dereference is needed. - tag_if_small_enough: Tag, - ptr_otherwise: *Payload, - - pub fn zigTypeTag(ty: Type) std.builtin.TypeId { - return ty.zigTypeTagOrPoison() catch unreachable; - } +pub const Type = struct { + /// We are migrating towards using this for every Type object. However, many + /// types are still represented the legacy way. This is indicated by using + /// InternPool.Index.none. + ip_index: InternPool.Index, + + /// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication. + /// This union takes advantage of the fact that the first page of memory + /// is unmapped, giving us 4096 possible enum tags that have no payload. + legacy: extern union { + /// If the tag value is less than Tag.no_payload_count, then no pointer + /// dereference is needed. + tag_if_small_enough: Tag, + ptr_otherwise: *Payload, + }, + + pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId { + return ty.zigTypeTagOrPoison(mod) catch unreachable; + } + + pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { + if (ty.ip_index != .none) { + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return .Int, + .ptr_type => return .Pointer, + .array_type => return .Array, + .vector_type => return .Vector, + .optional_type => return .Optional, + .error_union_type => return .ErrorUnion, + .struct_type => return .Struct, + .simple_type => |s| switch (s) { + .f16, + .f32, + .f64, + .f80, + .f128, + => return .Float, + + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + => return .Int, + + .anyopaque => return .Opaque, + .bool => return .Bool, + .void => return .Void, + .type => return .Type, + .anyerror => return .ErrorSet, + .comptime_int => return .ComptimeInt, + .comptime_float => return .ComptimeFloat, + .noreturn => return .NoReturn, + .@"anyframe" => return .AnyFrame, + .null => return .Null, + .undefined => return .Undefined, + .enum_literal => return .EnumLiteral, + + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + => return .Enum, + + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => return .Struct, + + .type_info => return .Union, + + .generic_poison => unreachable, + .var_args_param => unreachable, + }, - pub fn zigTypeTagOrPoison(ty: Type) error{GenericPoison}!std.builtin.TypeId { + .extern_func, + .int, + .enum_tag, + .simple_value, + => unreachable, // it's a value, not a type + } + } switch (ty.tag()) { .generic_poison => return error.GenericPoison, @@ -56,8 +131,6 @@ pub const Type = extern union { .c_ulong, .c_longlong, .c_ulonglong, - .int_signed, - .int_unsigned, => return .Int, .f16, @@ -85,10 +158,6 @@ pub const Type = extern union { .null => return .Null, .undefined => return .Undefined, - .fn_noreturn_no_args => return .Fn, - .fn_void_no_args => return .Fn, - .fn_naked_noreturn_no_args => return .Fn, - .fn_ccc_void_no_args => return .Fn, .function => return .Fn, .array, @@ -159,26 +228,26 @@ pub const Type = extern union { } } - pub fn baseZigTypeTag(self: Type) std.builtin.TypeId { - return switch (self.zigTypeTag()) { - .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(), + pub fn baseZigTypeTag(self: Type, mod: *const Module) std.builtin.TypeId { + return switch (self.zigTypeTag(mod)) { + .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(mod), .Optional => { var buf: Payload.ElemType = undefined; - return self.optionalChild(&buf).baseZigTypeTag(); + return self.optionalChild(&buf).baseZigTypeTag(mod); }, else => |t| t, }; } - pub fn isSelfComparable(ty: Type, is_equality_cmp: bool) bool { - return switch (ty.zigTypeTag()) { + pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) bool { + return switch (ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, => true, - .Vector => ty.elemType2().isSelfComparable(is_equality_cmp), + .Vector => ty.elemType2(mod).isSelfComparable(mod, is_equality_cmp), .Bool, .Type, @@ -205,44 +274,54 @@ pub const Type = extern union { .Optional => { if (!is_equality_cmp) return false; var buf: Payload.ElemType = undefined; - return ty.optionalChild(&buf).isSelfComparable(is_equality_cmp); + return ty.optionalChild(&buf).isSelfComparable(mod, is_equality_cmp); }, }; } pub fn initTag(comptime small_tag: Tag) Type { comptime assert(@enumToInt(small_tag) < Tag.no_payload_count); - return .{ .tag_if_small_enough = small_tag }; + return Type{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = small_tag }, + }; } pub fn initPayload(payload: *Payload) Type { assert(@enumToInt(payload.tag) >= Tag.no_payload_count); - return .{ .ptr_otherwise = payload }; + return Type{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = payload }, + }; } - pub fn tag(self: Type) Tag { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return self.tag_if_small_enough; + pub fn tag(ty: Type) Tag { + assert(ty.ip_index == .none); + if (@enumToInt(ty.legacy.tag_if_small_enough) < Tag.no_payload_count) { + return ty.legacy.tag_if_small_enough; } else { - return self.ptr_otherwise.tag; + return ty.legacy.ptr_otherwise.tag; } } /// Prefer `castTag` to this. pub fn cast(self: Type, comptime T: type) ?*T { + if (self.ip_index != .none) { + return null; + } if (@hasField(T, "base_tag")) { return self.castTag(T.base_tag); } - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { return null; } inline for (@typeInfo(Tag).Enum.fields) |field| { if (field.value < Tag.no_payload_count) continue; const t = @intToEnum(Tag, field.value); - if (self.ptr_otherwise.tag == t) { + if (self.legacy.ptr_otherwise.tag == t) { if (T == t.Type()) { - return @fieldParentPtr(T, "base", self.ptr_otherwise); + return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise); } return null; } @@ -251,11 +330,14 @@ pub const Type = extern union { } pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) + if (self.ip_index != .none) { + return null; + } + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; - if (self.ptr_otherwise.tag == t) - return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise); + if (self.legacy.ptr_otherwise.tag == t) + return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise); return null; } @@ -285,10 +367,10 @@ pub const Type = extern union { } /// If it is a function pointer, returns the function type. Otherwise returns null. - pub fn castPtrToFn(ty: Type) ?Type { - if (ty.zigTypeTag() != .Pointer) return null; + pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { + if (ty.zigTypeTag(mod) != .Pointer) return null; const elem_ty = ty.childType(); - if (elem_ty.zigTypeTag() != .Fn) return null; + if (elem_ty.zigTypeTag(mod) != .Fn) return null; return elem_ty; } @@ -536,7 +618,10 @@ pub const Type = extern union { pub fn eql(a: Type, b: Type, mod: *Module) bool { // As a shortcut, if the small tags / addresses match, we're done. - if (a.tag_if_small_enough == b.tag_if_small_enough) return true; + if (a.ip_index != .none or b.ip_index != .none) { + return a.ip_index == b.ip_index; + } + if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true; switch (a.tag()) { .generic_poison => unreachable, @@ -589,16 +674,11 @@ pub const Type = extern union { .i64, .u128, .i128, - .int_signed, - .int_unsigned, => { - if (b.zigTypeTag() != .Int) return false; + if (b.zigTypeTag(mod) != .Int) return false; if (b.isNamedInt()) return false; - - // Arbitrary sized integers. The target will not be branched upon, - // because we handled target-dependent cases above. - const info_a = a.intInfo(@as(Target, undefined)); - const info_b = b.intInfo(@as(Target, undefined)); + const info_a = a.intInfo(mod); + const info_b = b.intInfo(mod); return info_a.signedness == info_b.signedness and info_a.bits == info_b.bits; }, @@ -641,13 +721,8 @@ pub const Type = extern union { return opaque_obj_a == opaque_obj_b; }, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => { - if (b.zigTypeTag() != .Fn) return false; + .function => { + if (b.zigTypeTag(mod) != .Fn) return false; const a_info = a.fnInfo(); const b_info = b.fnInfo(); @@ -699,7 +774,7 @@ pub const Type = extern union { .array_sentinel, .vector, => { - if (a.zigTypeTag() != b.zigTypeTag()) return false; + if (a.zigTypeTag(mod) != b.zigTypeTag(mod)) return false; if (a.arrayLen() != b.arrayLen()) return false; @@ -737,7 +812,7 @@ pub const Type = extern union { .manyptr_const_u8, .manyptr_const_u8_sentinel_0, => { - if (b.zigTypeTag() != .Pointer) return false; + if (b.zigTypeTag(mod) != .Pointer) return false; const info_a = a.ptrInfo().data; const info_b = b.ptrInfo().data; @@ -783,7 +858,7 @@ pub const Type = extern union { .optional_single_const_pointer, .optional_single_mut_pointer, => { - if (b.zigTypeTag() != .Optional) return false; + if (b.zigTypeTag(mod) != .Optional) return false; var buf_a: Payload.ElemType = undefined; var buf_b: Payload.ElemType = undefined; @@ -791,7 +866,7 @@ pub const Type = extern union { }, .anyerror_void_error_union, .error_union => { - if (b.zigTypeTag() != .ErrorUnion) return false; + if (b.zigTypeTag(mod) != .ErrorUnion) return false; const a_set = a.errorUnionSet(); const b_set = b.errorUnionSet(); @@ -805,8 +880,8 @@ pub const Type = extern union { }, .anyframe_T => { - if (b.zigTypeTag() != .AnyFrame) return false; - return a.elemType2().eql(b.elemType2(), mod); + if (b.zigTypeTag(mod) != .AnyFrame) return false; + return a.elemType2(mod).eql(b.elemType2(mod), mod); }, .empty_struct => { @@ -941,6 +1016,9 @@ pub const Type = extern union { } pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { + if (ty.ip_index != .none) { + return mod.intern_pool.indexToKey(ty.ip_index).hashWithHasher(hasher); + } switch (ty.tag()) { .generic_poison => unreachable, @@ -1007,13 +1085,10 @@ pub const Type = extern union { .i64, .u128, .i128, - .int_signed, - .int_unsigned, => { - // Arbitrary sized integers. The target will not be branched upon, - // because we handled target-dependent cases above. + // Arbitrary sized integers. std.hash.autoHash(hasher, std.builtin.TypeId.Int); - const info = ty.intInfo(@as(Target, undefined)); + const info = ty.intInfo(mod); std.hash.autoHash(hasher, info.signedness); std.hash.autoHash(hasher, info.bits); }, @@ -1052,12 +1127,7 @@ pub const Type = extern union { std.hash.autoHash(hasher, opaque_obj); }, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => { + .function => { std.hash.autoHash(hasher, std.builtin.TypeId.Fn); const fn_info = ty.fnInfo(); @@ -1275,9 +1345,15 @@ pub const Type = extern union { }; pub fn copy(self: Type, allocator: Allocator) error{OutOfMemory}!Type { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return Type{ .tag_if_small_enough = self.tag_if_small_enough }; - } else switch (self.ptr_otherwise.tag) { + if (self.ip_index != .none) { + return Type{ .ip_index = self.ip_index, .legacy = undefined }; + } + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { + return Type{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, + }; + } else switch (self.legacy.ptr_otherwise.tag) { .u1, .u8, .i8, @@ -1317,10 +1393,6 @@ pub const Type = extern union { .noreturn, .null, .undefined, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .single_const_pointer_to_comptime_int, .const_slice_u8, .const_slice_u8_sentinel_0, @@ -1370,13 +1442,12 @@ pub const Type = extern union { .base = .{ .tag = payload.base.tag }, .data = try payload.data.copy(allocator), }; - return Type{ .ptr_otherwise = &new_payload.base }; + return Type{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, - .int_signed, - .int_unsigned, - => return self.copyPayloadShallow(allocator, Payload.Bits), - .vector => { const payload = self.castTag(.vector).?.data; return Tag.vector.create(allocator, .{ @@ -1511,7 +1582,10 @@ pub const Type = extern union { const payload = self.cast(T).?; const new_payload = try allocator.create(T); new_payload.* = payload.*; - return Type{ .ptr_otherwise = &new_payload.base }; + return Type{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; } pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { @@ -1550,7 +1624,7 @@ pub const Type = extern union { } /// This is a debug function. In order to print types in a meaningful way - /// we also need access to the target. + /// we also need access to the module. pub fn dump( start_type: Type, comptime unused_format_string: []const u8, @@ -1559,10 +1633,13 @@ pub const Type = extern union { ) @TypeOf(writer).Error!void { _ = options; comptime assert(unused_format_string.len == 0); + if (start_type.ip_index != .none) { + return writer.print("(intern index: {d})", .{@enumToInt(start_type.ip_index)}); + } if (true) { - // This is disabled to work around a bug where this function - // recursively causes more generic function instantiations - // resulting in an infinite loop in the compiler. + // This is disabled to work around a stage2 bug where this function recursively + // causes more generic function instantiations resulting in an infinite loop + // in the compiler. try writer.writeAll("[TODO fix internal compiler bug regarding dump]"); return; } @@ -1656,10 +1733,6 @@ pub const Type = extern union { .anyerror_void_error_union => return writer.writeAll("anyerror!void"), .const_slice_u8 => return writer.writeAll("[]const u8"), .const_slice_u8_sentinel_0 => return writer.writeAll("[:0]const u8"), - .fn_noreturn_no_args => return writer.writeAll("fn() noreturn"), - .fn_void_no_args => return writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args => return writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args => return writer.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int => return writer.writeAll("*const comptime_int"), .manyptr_u8 => return writer.writeAll("[*]u8"), .manyptr_const_u8 => return writer.writeAll("[*]const u8"), @@ -1820,14 +1893,6 @@ pub const Type = extern union { ty = pointee_type; continue; }, - .int_signed => { - const bits = ty.castTag(.int_signed).?.data; - return writer.print("i{d}", .{bits}); - }, - .int_unsigned => { - const bits = ty.castTag(.int_unsigned).?.data; - return writer.print("u{d}", .{bits}); - }, .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); @@ -1938,6 +2003,26 @@ pub const Type = extern union { /// Prints a name suitable for `@typeName`. pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + const sign_char: u8 = switch (int_type.signedness) { + .signed => 'i', + .unsigned => 'u', + }; + return writer.print("{c}{d}", .{ sign_char, int_type.bits }); + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => |s| return writer.writeAll(@tagName(s)), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, + }; const t = ty.tag(); switch (t) { .inferred_alloc_const => unreachable, @@ -2041,10 +2126,6 @@ pub const Type = extern union { .anyerror_void_error_union => try writer.writeAll("anyerror!void"), .const_slice_u8 => try writer.writeAll("[]const u8"), .const_slice_u8_sentinel_0 => try writer.writeAll("[:0]const u8"), - .fn_noreturn_no_args => try writer.writeAll("fn() noreturn"), - .fn_void_no_args => try writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args => try writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args => try writer.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int => try writer.writeAll("*const comptime_int"), .manyptr_u8 => try writer.writeAll("[*]u8"), .manyptr_const_u8 => try writer.writeAll("[*]const u8"), @@ -2200,7 +2281,7 @@ pub const Type = extern union { if (info.@"align" != 0) { try writer.print("align({d}", .{info.@"align"}); } else { - const alignment = info.pointee_type.abiAlignment(mod.getTarget()); + const alignment = info.pointee_type.abiAlignment(mod); try writer.print("align({d}", .{alignment}); } @@ -2224,14 +2305,6 @@ pub const Type = extern union { try print(info.pointee_type, writer, mod); }, - .int_signed => { - const bits = ty.castTag(.int_signed).?.data; - return writer.print("i{d}", .{bits}); - }, - .int_unsigned => { - const bits = ty.castTag(.int_unsigned).?.data; - return writer.print("u{d}", .{bits}); - }, .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); @@ -2317,10 +2390,6 @@ pub const Type = extern union { .noreturn => return Value.initTag(.noreturn_type), .null => return Value.initTag(.null_type), .undefined => return Value.initTag(.undefined_type), - .fn_noreturn_no_args => return Value.initTag(.fn_noreturn_no_args_type), - .fn_void_no_args => return Value.initTag(.fn_void_no_args_type), - .fn_naked_noreturn_no_args => return Value.initTag(.fn_naked_noreturn_no_args_type), - .fn_ccc_void_no_args => return Value.initTag(.fn_ccc_void_no_args_type), .single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type), .const_slice_u8 => return Value.initTag(.const_slice_u8_type), .const_slice_u8_sentinel_0 => return Value.initTag(.const_slice_u8_sentinel_0_type), @@ -2360,9 +2429,24 @@ pub const Type = extern union { /// may return false positives. pub fn hasRuntimeBitsAdvanced( ty: Type, + mod: *const Module, ignore_comptime_only: bool, strat: AbiAlignmentAdvancedStrat, ) RuntimeBitsError!bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.bits != 0, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; switch (ty.tag()) { .u1, .u8, @@ -2440,12 +2524,12 @@ pub const Type = extern union { => { if (ignore_comptime_only) { return true; - } else if (ty.childType().zigTypeTag() == .Fn) { + } else if (ty.childType().zigTypeTag(mod) == .Fn) { return !ty.childType().fnInfo().is_generic; } else if (strat == .sema) { return !(try strat.sema.typeRequiresComptime(ty)); } else { - return !comptimeOnly(ty); + return !comptimeOnly(ty, mod); } }, @@ -2465,10 +2549,6 @@ pub const Type = extern union { // Special exceptions have to be made when emitting functions due to // this returning false. .function, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, => return false, .optional => { @@ -2483,7 +2563,7 @@ pub const Type = extern union { } else if (strat == .sema) { return !(try strat.sema.typeRequiresComptime(child_ty)); } else { - return !comptimeOnly(child_ty); + return !comptimeOnly(child_ty, mod); } }, @@ -2502,7 +2582,7 @@ pub const Type = extern union { } for (struct_obj.fields.values()) |field| { if (field.is_comptime) continue; - if (try field.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) + if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } else { return false; @@ -2511,16 +2591,15 @@ pub const Type = extern union { .enum_full => { const enum_full = ty.castTag(.enum_full).?.data; - return enum_full.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat); + return enum_full.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); }, .enum_simple => { const enum_simple = ty.castTag(.enum_simple).?.data; return enum_simple.fields.count() >= 2; }, .enum_numbered, .enum_nonexhaustive => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return int_tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat); + const int_tag_ty = ty.intTagType(); + return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); }, .@"union" => { @@ -2537,7 +2616,7 @@ pub const Type = extern union { .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, } for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) + if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } else { return false; @@ -2545,7 +2624,7 @@ pub const Type = extern union { }, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) { + if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) { return true; } @@ -2555,7 +2634,7 @@ pub const Type = extern union { .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, } for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) + if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } else { return false; @@ -2563,18 +2642,16 @@ pub const Type = extern union { }, .array, .vector => return ty.arrayLen() != 0 and - try ty.elemType().hasRuntimeBitsAdvanced(ignore_comptime_only, strat), + try ty.elemType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .array_u8 => return ty.arrayLen() != 0, - .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(ignore_comptime_only, strat), - - .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data != 0, + .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { const val = tuple.values[i]; if (val.tag() != .unreachable_value) continue; // comptime field - if (try field_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) return true; + if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } return false; }, @@ -2588,7 +2665,21 @@ pub const Type = extern union { /// true if and only if the type has a well-defined memory layout /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout - pub fn hasWellDefinedLayout(ty: Type) bool { + pub fn hasWellDefinedLayout(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return true, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; return switch (ty.tag()) { .u1, .u8, @@ -2626,8 +2717,6 @@ pub const Type = extern union { .manyptr_const_u8_sentinel_0, .array_u8, .array_u8_sentinel_0, - .int_signed, - .int_unsigned, .pointer, .single_const_pointer, .single_mut_pointer, @@ -2670,10 +2759,6 @@ pub const Type = extern union { .enum_literal, .type_info, // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, .const_slice_u8, .const_slice_u8_sentinel_0, @@ -2698,25 +2783,25 @@ pub const Type = extern union { .array, .array_sentinel, - => ty.childType().hasWellDefinedLayout(), + => ty.childType().hasWellDefinedLayout(mod), - .optional => ty.isPtrLikeOptional(), + .optional => ty.isPtrLikeOptional(mod), .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto, .@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto, .union_tagged => false, }; } - pub fn hasRuntimeBits(ty: Type) bool { - return hasRuntimeBitsAdvanced(ty, false, .eager) catch unreachable; + pub fn hasRuntimeBits(ty: Type, mod: *const Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; } - pub fn hasRuntimeBitsIgnoreComptime(ty: Type) bool { - return hasRuntimeBitsAdvanced(ty, true, .eager) catch unreachable; + pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; } - pub fn isFnOrHasRuntimeBits(ty: Type) bool { - switch (ty.zigTypeTag()) { + pub fn isFnOrHasRuntimeBits(ty: Type, mod: *const Module) bool { + switch (ty.zigTypeTag(mod)) { .Fn => { const fn_info = ty.fnInfo(); if (fn_info.is_generic) return false; @@ -2727,18 +2812,18 @@ pub const Type = extern union { .Inline => return false, else => {}, } - if (fn_info.return_type.comptimeOnly()) return false; + if (fn_info.return_type.comptimeOnly(mod)) return false; return true; }, - else => return ty.hasRuntimeBits(), + else => return ty.hasRuntimeBits(mod), } } /// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. - pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Fn => true, - else => return ty.hasRuntimeBitsIgnoreComptime(), + else => return ty.hasRuntimeBitsIgnoreComptime(mod), }; } @@ -2761,11 +2846,11 @@ pub const Type = extern union { } /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. - pub fn ptrAlignment(ty: Type, target: Target) u32 { - return ptrAlignmentAdvanced(ty, target, null) catch unreachable; + pub fn ptrAlignment(ty: Type, mod: *const Module) u32 { + return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; } - pub fn ptrAlignmentAdvanced(ty: Type, target: Target, opt_sema: ?*Sema) !u32 { + pub fn ptrAlignmentAdvanced(ty: Type, mod: *const Module, opt_sema: ?*Sema) !u32 { switch (ty.tag()) { .single_const_pointer, .single_mut_pointer, @@ -2780,10 +2865,10 @@ pub const Type = extern union { => { const child_type = ty.cast(Payload.ElemType).?.data; if (opt_sema) |sema| { - const res = try child_type.abiAlignmentAdvanced(target, .{ .sema = sema }); + const res = try child_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); return res.scalar; } - return (child_type.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar; + return (child_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; }, .manyptr_u8, @@ -2798,13 +2883,13 @@ pub const Type = extern union { if (ptr_info.@"align" != 0) { return ptr_info.@"align"; } else if (opt_sema) |sema| { - const res = try ptr_info.pointee_type.abiAlignmentAdvanced(target, .{ .sema = sema }); + const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); return res.scalar; } else { - return (ptr_info.pointee_type.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar; + return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } }, - .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(target, opt_sema), + .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema), else => unreachable, } @@ -2843,13 +2928,13 @@ pub const Type = extern union { } /// Returns 0 for 0-bit types. - pub fn abiAlignment(ty: Type, target: Target) u32 { - return (ty.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar; + pub fn abiAlignment(ty: Type, mod: *const Module) u32 { + return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } /// May capture a reference to `ty`. - pub fn lazyAbiAlignment(ty: Type, target: Target, arena: Allocator) !Value { - switch (try ty.abiAlignmentAdvanced(target, .{ .lazy = arena })) { + pub fn lazyAbiAlignment(ty: Type, mod: *const Module, arena: Allocator) !Value { + switch (try ty.abiAlignmentAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, .scalar => |x| return Value.Tag.int_u64.create(arena, x), } @@ -2874,9 +2959,29 @@ pub const Type = extern union { /// necessary, possibly returning a CompileError. pub fn abiAlignmentAdvanced( ty: Type, - target: Target, + mod: *const Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiAlignmentAdvanced { + const target = mod.getTarget(); + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; + return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) }; + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + const opt_sema = switch (strat) { .sema => |sema| sema, else => null, @@ -2902,12 +3007,6 @@ pub const Type = extern union { .anyopaque, => return AbiAlignmentAdvanced{ .scalar = 1 }, - .fn_noreturn_no_args, // represents machine code; not a pointer - .fn_void_no_args, // represents machine code; not a pointer - .fn_naked_noreturn_no_args, // represents machine code; not a pointer - .fn_ccc_void_no_args, // represents machine code; not a pointer - => return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }, - // represents machine code; not a pointer .function => { const alignment = ty.castTag(.function).?.data.alignment; @@ -2958,12 +3057,11 @@ pub const Type = extern union { .f80 => switch (target.c_type_bit_size(.longdouble)) { 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, else => { - var payload: Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = 80, + const u80_ty: Type = .{ + .ip_index = .u80_type, + .legacy = undefined, }; - const u80_ty = initPayload(&payload.base); - return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, target) }; + return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) }; }, }, .f128 => switch (target.c_type_bit_size(.longdouble)) { @@ -2980,11 +3078,11 @@ pub const Type = extern union { .error_set_merged, => return AbiAlignmentAdvanced{ .scalar = 2 }, - .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(target, strat), + .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(mod, strat), .vector => { const len = ty.arrayLen(); - const bits = try bitSizeAdvanced(ty.elemType(), target, opt_sema); + const bits = try bitSizeAdvanced(ty.elemType(), mod, opt_sema); const bytes = ((bits * len) + 7) / 8; const alignment = std.math.ceilPowerOfTwoAssert(u64, bytes); return AbiAlignmentAdvanced{ .scalar = @intCast(u32, alignment) }; @@ -2996,34 +3094,28 @@ pub const Type = extern union { .i64, .u64 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(64, target) }, .u128, .i128 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(128, target) }, - .int_signed, .int_unsigned => { - const bits: u16 = ty.cast(Payload.Bits).?.data; - if (bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; - return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(bits, target) }; - }, - .optional => { var buf: Payload.ElemType = undefined; const child_type = ty.optionalChild(&buf); - switch (child_type.zigTypeTag()) { + switch (child_type.zigTypeTag(mod)) { .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, target, strat), + .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 }, else => {}, } switch (strat) { .eager, .sema => { - if (!(child_type.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) { return AbiAlignmentAdvanced{ .scalar = 1 }; } - return child_type.abiAlignmentAdvanced(target, strat); + return child_type.abiAlignmentAdvanced(mod, strat); }, - .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(target, strat)) { + .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(mod, strat)) { .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) }, .val => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, }, @@ -3034,10 +3126,10 @@ pub const Type = extern union { // This code needs to be kept in sync with the equivalent switch prong // in abiSizeAdvanced. const data = ty.castTag(.error_union).?.data; - const code_align = abiAlignment(Type.anyerror, target); + const code_align = abiAlignment(Type.anyerror, mod); switch (strat) { .eager, .sema => { - if (!(data.payload.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) { @@ -3045,11 +3137,11 @@ pub const Type = extern union { } return AbiAlignmentAdvanced{ .scalar = @max( code_align, - (try data.payload.abiAlignmentAdvanced(target, strat)).scalar, + (try data.payload.abiAlignmentAdvanced(mod, strat)).scalar, ) }; }, .lazy => |arena| { - switch (try data.payload.abiAlignmentAdvanced(target, strat)) { + switch (try data.payload.abiAlignmentAdvanced(mod, strat)) { .scalar => |payload_align| { return AbiAlignmentAdvanced{ .scalar = @max(code_align, payload_align), @@ -3089,20 +3181,20 @@ pub const Type = extern union { .eager => {}, } assert(struct_obj.haveLayout()); - return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(target) }; + return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) }; } const fields = ty.structFields(); var big_align: u32 = 0; for (fields.values()) |field| { - if (!(field.ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) continue; const field_align = if (field.abi_align != 0) field.abi_align - else switch (try field.ty.abiAlignmentAdvanced(target, strat)) { + else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |a| a, .val => switch (strat) { .eager => unreachable, // struct layout not resolved @@ -3114,7 +3206,7 @@ pub const Type = extern union { // This logic is duplicated in Module.Struct.Field.alignment. if (struct_obj.layout == .Extern or target.ofmt == .c) { - if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) { + if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { // The C ABI requires 128 bit integer fields of structs // to be 16-bytes aligned. big_align = @max(big_align, 16); @@ -3130,9 +3222,9 @@ pub const Type = extern union { for (tuple.types, 0..) |field_ty, i| { const val = tuple.values[i]; if (val.tag() != .unreachable_value) continue; // comptime field - if (!(field_ty.hasRuntimeBits())) continue; + if (!(field_ty.hasRuntimeBits(mod))) continue; - switch (try field_ty.abiAlignmentAdvanced(target, strat)) { + switch (try field_ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |field_align| big_align = @max(big_align, field_align), .val => switch (strat) { .eager => unreachable, // field type alignment not resolved @@ -3145,17 +3237,16 @@ pub const Type = extern union { }, .enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(target) }; + const int_tag_ty = ty.intTagType(); + return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) }; }, .@"union" => { const union_obj = ty.castTag(.@"union").?.data; - return abiAlignmentAdvancedUnion(ty, target, strat, union_obj, false); + return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, false); }, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return abiAlignmentAdvancedUnion(ty, target, strat, union_obj, true); + return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, true); }, .empty_struct, @@ -3181,7 +3272,7 @@ pub const Type = extern union { pub fn abiAlignmentAdvancedUnion( ty: Type, - target: Target, + mod: *const Module, strat: AbiAlignmentAdvancedStrat, union_obj: *Module.Union, have_tag: bool, @@ -3195,6 +3286,7 @@ pub const Type = extern union { // We'll guess "pointer-aligned", if the union has an // underaligned pointer field then some allocations // might require explicit alignment. + const target = mod.getTarget(); return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; } _ = try sema.resolveTypeFields(ty); @@ -3206,23 +3298,23 @@ pub const Type = extern union { }; if (union_obj.fields.count() == 0) { if (have_tag) { - return abiAlignmentAdvanced(union_obj.tag_ty, target, strat); + return abiAlignmentAdvanced(union_obj.tag_ty, mod, strat); } else { return AbiAlignmentAdvanced{ .scalar = @boolToInt(union_obj.layout == .Extern) }; } } var max_align: u32 = 0; - if (have_tag) max_align = union_obj.tag_ty.abiAlignment(target); + if (have_tag) max_align = union_obj.tag_ty.abiAlignment(mod); for (union_obj.fields.values()) |field| { - if (!(field.ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) continue; const field_align = if (field.abi_align != 0) field.abi_align - else switch (try field.ty.abiAlignmentAdvanced(target, strat)) { + else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |a| a, .val => switch (strat) { .eager => unreachable, // struct layout not resolved @@ -3236,8 +3328,8 @@ pub const Type = extern union { } /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, target: Target, arena: Allocator) !Value { - switch (try ty.abiSizeAdvanced(target, .{ .lazy = arena })) { + pub fn lazyAbiSize(ty: Type, mod: *const Module, arena: Allocator) !Value { + switch (try ty.abiSizeAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, .scalar => |x| return Value.Tag.int_u64.create(arena, x), } @@ -3245,8 +3337,8 @@ pub const Type = extern union { /// Asserts the type has the ABI size already resolved. /// Types that return false for hasRuntimeBits() return 0. - pub fn abiSize(ty: Type, target: Target) u64 { - return (abiSizeAdvanced(ty, target, .eager) catch unreachable).scalar; + pub fn abiSize(ty: Type, mod: *const Module) u64 { + return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; } const AbiSizeAdvanced = union(enum) { @@ -3262,14 +3354,30 @@ pub const Type = extern union { /// necessary, possibly returning a CompileError. pub fn abiSizeAdvanced( ty: Type, - target: Target, + mod: *const Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiSizeAdvanced { + const target = mod.getTarget(); + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target) }; + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + switch (ty.tag()) { - .fn_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_void_no_args => unreachable, // represents machine code; not a pointer - .fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer .function => unreachable, // represents machine code; not a pointer .@"opaque" => unreachable, // no size available .noreturn => unreachable, @@ -3308,7 +3416,7 @@ pub const Type = extern union { .eager => {}, } assert(struct_obj.haveLayout()); - return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(target) }; + return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) }; }, else => { switch (strat) { @@ -3327,22 +3435,21 @@ pub const Type = extern union { if (field_count == 0) { return AbiSizeAdvanced{ .scalar = 0 }; } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, target) }; + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, }, .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(target) }; + const int_tag_ty = ty.intTagType(); + return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) }; }, .@"union" => { const union_obj = ty.castTag(.@"union").?.data; - return abiSizeAdvancedUnion(ty, target, strat, union_obj, false); + return abiSizeAdvancedUnion(ty, mod, strat, union_obj, false); }, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return abiSizeAdvancedUnion(ty, target, strat, union_obj, true); + return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true); }, .u1, @@ -3361,7 +3468,7 @@ pub const Type = extern union { .array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 }, .array => { const payload = ty.castTag(.array).?.data; - switch (try payload.elem_type.abiSizeAdvanced(target, strat)) { + switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = payload.len * elem_size }, .val => switch (strat) { .sema => unreachable, @@ -3372,7 +3479,7 @@ pub const Type = extern union { }, .array_sentinel => { const payload = ty.castTag(.array_sentinel).?.data; - switch (try payload.elem_type.abiSizeAdvanced(target, strat)) { + switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = (payload.len + 1) * elem_size }, .val => switch (strat) { .sema => unreachable, @@ -3391,10 +3498,10 @@ pub const Type = extern union { .val = try Value.Tag.lazy_size.create(arena, ty), }, }; - const elem_bits = try payload.elem_type.bitSizeAdvanced(target, opt_sema); + const elem_bits = try payload.elem_type.bitSizeAdvanced(mod, opt_sema); const total_bits = elem_bits * payload.len; const total_bytes = (total_bits + 7) / 8; - const alignment = switch (try ty.abiAlignmentAdvanced(target, strat)) { + const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |x| x, .val => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty), @@ -3450,12 +3557,11 @@ pub const Type = extern union { .f80 => switch (target.c_type_bit_size(.longdouble)) { 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, else => { - var payload: Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = 80, + const u80_ty: Type = .{ + .ip_index = .u80_type, + .legacy = undefined, }; - const u80_ty = initPayload(&payload.base); - return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, target) }; + return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; }, }, @@ -3473,11 +3579,6 @@ pub const Type = extern union { .i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) }, .i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) }, .u128, .i128 => return AbiSizeAdvanced{ .scalar = intAbiSize(128, target) }, - .int_signed, .int_unsigned => { - const bits: u16 = ty.cast(Payload.Bits).?.data; - if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target) }; - }, .optional => { var buf: Payload.ElemType = undefined; @@ -3487,16 +3588,16 @@ pub const Type = extern union { return AbiSizeAdvanced{ .scalar = 0 }; } - if (!(child_type.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, else => |e| return e, })) return AbiSizeAdvanced{ .scalar = 1 }; - if (ty.optionalReprIsPayload()) { - return abiSizeAdvanced(child_type, target, strat); + if (ty.optionalReprIsPayload(mod)) { + return abiSizeAdvanced(child_type, mod, strat); } - const payload_size = switch (try child_type.abiSizeAdvanced(target, strat)) { + const payload_size = switch (try child_type.abiSizeAdvanced(mod, strat)) { .scalar => |elem_size| elem_size, .val => switch (strat) { .sema => unreachable, @@ -3510,7 +3611,7 @@ pub const Type = extern union { // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal // to the child type's ABI alignment. return AbiSizeAdvanced{ - .scalar = child_type.abiAlignment(target) + payload_size, + .scalar = child_type.abiAlignment(mod) + payload_size, }; }, @@ -3518,17 +3619,17 @@ pub const Type = extern union { // This code needs to be kept in sync with the equivalent switch prong // in abiAlignmentAdvanced. const data = ty.castTag(.error_union).?.data; - const code_size = abiSize(Type.anyerror, target); - if (!(data.payload.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + const code_size = abiSize(Type.anyerror, mod); + if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, else => |e| return e, })) { // Same as anyerror. return AbiSizeAdvanced{ .scalar = code_size }; } - const code_align = abiAlignment(Type.anyerror, target); - const payload_align = abiAlignment(data.payload, target); - const payload_size = switch (try data.payload.abiSizeAdvanced(target, strat)) { + const code_align = abiAlignment(Type.anyerror, mod); + const payload_align = abiAlignment(data.payload, mod); + const payload_size = switch (try data.payload.abiSizeAdvanced(mod, strat)) { .scalar => |elem_size| elem_size, .val => switch (strat) { .sema => unreachable, @@ -3556,7 +3657,7 @@ pub const Type = extern union { pub fn abiSizeAdvancedUnion( ty: Type, - target: Target, + mod: *const Module, strat: AbiAlignmentAdvancedStrat, union_obj: *Module.Union, have_tag: bool, @@ -3570,7 +3671,7 @@ pub const Type = extern union { }, .eager => {}, } - return AbiSizeAdvanced{ .scalar = union_obj.abiSize(target, have_tag) }; + return AbiSizeAdvanced{ .scalar = union_obj.abiSize(mod, have_tag) }; } fn intAbiSize(bits: u16, target: Target) u64 { @@ -3585,8 +3686,8 @@ pub const Type = extern union { ); } - pub fn bitSize(ty: Type, target: Target) u64 { - return bitSizeAdvanced(ty, target, null) catch unreachable; + pub fn bitSize(ty: Type, mod: *const Module) u64 { + return bitSizeAdvanced(ty, mod, null) catch unreachable; } /// If you pass `opt_sema`, any recursive type resolutions will happen if @@ -3594,15 +3695,29 @@ pub const Type = extern union { /// the type is fully resolved, and there will be no error, guaranteed. pub fn bitSizeAdvanced( ty: Type, - target: Target, + mod: *const Module, opt_sema: ?*Sema, ) Module.CompileError!u64 { + const target = mod.getTarget(); + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.bits, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; + switch (ty.tag()) { - .fn_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_void_no_args => unreachable, // represents machine code; not a pointer - .fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer .function => unreachable, // represents machine code; not a pointer .anyopaque => unreachable, .type => unreachable, @@ -3633,68 +3748,67 @@ pub const Type = extern union { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; if (struct_obj.layout != .Packed) { - return (try ty.abiSizeAdvanced(target, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; } if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); assert(struct_obj.haveLayout()); - return try struct_obj.backing_int_ty.bitSizeAdvanced(target, opt_sema); + return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); }, .tuple, .anon_struct => { if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); if (ty.containerLayout() != .Packed) { - return (try ty.abiSizeAdvanced(target, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; } var total: u64 = 0; for (ty.tupleFields().types) |field_ty| { - total += try bitSizeAdvanced(field_ty, target, opt_sema); + total += try bitSizeAdvanced(field_ty, mod, opt_sema); } return total; }, .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return try bitSizeAdvanced(int_tag_ty, target, opt_sema); + const int_tag_ty = ty.intTagType(); + return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); }, .@"union", .union_safety_tagged, .union_tagged => { if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); if (ty.containerLayout() != .Packed) { - return (try ty.abiSizeAdvanced(target, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; } const union_obj = ty.cast(Payload.Union).?.data; assert(union_obj.haveFieldTypes()); var size: u64 = 0; for (union_obj.fields.values()) |field| { - size = @max(size, try bitSizeAdvanced(field.ty, target, opt_sema)); + size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); } return size; }, .vector => { const payload = ty.castTag(.vector).?.data; - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); return elem_bit_size * payload.len; }, .array_u8 => return 8 * ty.castTag(.array_u8).?.data, .array_u8_sentinel_0 => return 8 * (ty.castTag(.array_u8_sentinel_0).?.data + 1), .array => { const payload = ty.castTag(.array).?.data; - const elem_size = std.math.max(payload.elem_type.abiAlignment(target), payload.elem_type.abiSize(target)); + const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); if (elem_size == 0 or payload.len == 0) return @as(u64, 0); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); return (payload.len - 1) * 8 * elem_size + elem_bit_size; }, .array_sentinel => { const payload = ty.castTag(.array_sentinel).?.data; const elem_size = std.math.max( - payload.elem_type.abiAlignment(target), - payload.elem_type.abiSize(target), + payload.elem_type.abiAlignment(mod), + payload.elem_type.abiSize(mod), ); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); return payload.len * 8 * elem_size + elem_bit_size; }, @@ -3757,12 +3871,10 @@ pub const Type = extern union { .error_set_merged, => return 16, // TODO revisit this when we have the concept of the error tag type - .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data, - .optional, .error_union => { // Optionals and error unions are not packed so their bitsize // includes padding bits. - return (try abiSizeAdvanced(ty, target, strat)).scalar * 8; + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; }, .atomic_order, @@ -3782,8 +3894,8 @@ pub const Type = extern union { /// Returns true if the type's layout is already resolved and it is safe /// to use `abiSize`, `abiAlignment` and `bitSize` on it. - pub fn layoutIsResolved(ty: Type) bool { - switch (ty.zigTypeTag()) { + pub fn layoutIsResolved(ty: Type, mod: *const Module) bool { + switch (ty.zigTypeTag(mod)) { .Struct => { if (ty.castTag(.@"struct")) |struct_ty| { return struct_ty.data.haveLayout(); @@ -3798,16 +3910,16 @@ pub const Type = extern union { }, .Array => { if (ty.arrayLenIncludingSentinel() == 0) return true; - return ty.childType().layoutIsResolved(); + return ty.childType().layoutIsResolved(mod); }, .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - return payload_ty.layoutIsResolved(); + return payload_ty.layoutIsResolved(mod); }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); - return payload_ty.layoutIsResolved(); + return payload_ty.layoutIsResolved(mod); }, else => return true, } @@ -3994,13 +4106,13 @@ pub const Type = extern union { }; } - pub fn isAllowzeroPtr(self: Type) bool { + pub fn isAllowzeroPtr(self: Type, mod: *const Module) bool { return switch (self.tag()) { .pointer => { const payload = self.castTag(.pointer).?.data; return payload.@"allowzero"; }, - else => return self.zigTypeTag() == .Optional, + else => return self.zigTypeTag(mod) == .Optional, }; } @@ -4016,7 +4128,7 @@ pub const Type = extern union { }; } - pub fn isPtrAtRuntime(self: Type) bool { + pub fn isPtrAtRuntime(self: Type, mod: *const Module) bool { switch (self.tag()) { .c_const_pointer, .c_mut_pointer, @@ -4040,7 +4152,7 @@ pub const Type = extern union { .optional => { var buf: Payload.ElemType = undefined; const child_type = self.optionalChild(&buf); - if (child_type.zigTypeTag() != .Pointer) return false; + if (child_type.zigTypeTag(mod) != .Pointer) return false; const info = child_type.ptrInfo().data; switch (info.size) { .Slice, .C => return false, @@ -4054,15 +4166,15 @@ pub const Type = extern union { /// For pointer-like optionals, returns true, otherwise returns the allowzero property /// of pointers. - pub fn ptrAllowsZero(ty: Type) bool { - if (ty.isPtrLikeOptional()) { + pub fn ptrAllowsZero(ty: Type, mod: *const Module) bool { + if (ty.isPtrLikeOptional(mod)) { return true; } return ty.ptrInfo().data.@"allowzero"; } /// See also `isPtrLikeOptional`. - pub fn optionalReprIsPayload(ty: Type) bool { + pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { switch (ty.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer, @@ -4072,7 +4184,7 @@ pub const Type = extern union { .optional => { const child_ty = ty.castTag(.optional).?.data; - switch (child_ty.zigTypeTag()) { + switch (child_ty.zigTypeTag(mod)) { .Pointer => { const info = child_ty.ptrInfo().data; switch (info.size) { @@ -4093,7 +4205,7 @@ pub const Type = extern union { /// Returns true if the type is optional and would be lowered to a single pointer /// address value, using 0 for null. Note that this returns true for C pointers. - pub fn isPtrLikeOptional(self: Type) bool { + pub fn isPtrLikeOptional(self: Type, mod: *const Module) bool { switch (self.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer, @@ -4103,7 +4215,7 @@ pub const Type = extern union { .optional => { const child_ty = self.castTag(.optional).?.data; - if (child_ty.zigTypeTag() != .Pointer) return false; + if (child_ty.zigTypeTag(mod) != .Pointer) return false; const info = child_ty.ptrInfo().data; switch (info.size) { .Slice, .C => return false, @@ -4166,7 +4278,7 @@ pub const Type = extern union { /// For [N]T, returns T. /// For []T, returns T. /// For anyframe->T, returns T. - pub fn elemType2(ty: Type) Type { + pub fn elemType2(ty: Type, mod: *const Module) Type { return switch (ty.tag()) { .vector => ty.castTag(.vector).?.data.elem_type, .array => ty.castTag(.array).?.data.elem_type, @@ -4181,7 +4293,7 @@ pub const Type = extern union { .single_const_pointer, .single_mut_pointer, - => ty.castPointer().?.data.shallowElemType(), + => ty.castPointer().?.data.shallowElemType(mod), .array_u8, .array_u8_sentinel_0, @@ -4197,7 +4309,7 @@ pub const Type = extern union { const info = ty.castTag(.pointer).?.data; const child_ty = info.pointee_type; if (info.size == .One) { - return child_ty.shallowElemType(); + return child_ty.shallowElemType(mod); } else { return child_ty; } @@ -4213,16 +4325,16 @@ pub const Type = extern union { }; } - fn shallowElemType(child_ty: Type) Type { - return switch (child_ty.zigTypeTag()) { + fn shallowElemType(child_ty: Type, mod: *const Module) Type { + return switch (child_ty.zigTypeTag(mod)) { .Array, .Vector => child_ty.childType(), else => child_ty, }; } /// For vectors, returns the element type. Otherwise returns self. - pub fn scalarType(ty: Type) Type { - return switch (ty.zigTypeTag()) { + pub fn scalarType(ty: Type, mod: *const Module) Type { + return switch (ty.zigTypeTag(mod)) { .Vector => ty.childType(), else => ty, }; @@ -4360,19 +4472,19 @@ pub const Type = extern union { return union_obj.fields.getIndex(name); } - pub fn unionHasAllZeroBitFieldTypes(ty: Type) bool { - return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(); + pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *const Module) bool { + return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(mod); } - pub fn unionGetLayout(ty: Type, target: Target) Module.Union.Layout { + pub fn unionGetLayout(ty: Type, mod: *const Module) Module.Union.Layout { switch (ty.tag()) { .@"union" => { const union_obj = ty.castTag(.@"union").?.data; - return union_obj.getLayout(target, false); + return union_obj.getLayout(mod, false); }, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.getLayout(target, true); + return union_obj.getLayout(mod, true); }, else => unreachable, } @@ -4441,8 +4553,8 @@ pub const Type = extern union { }; } - pub fn isError(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isError(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .ErrorUnion, .ErrorSet => true, else => false, }; @@ -4543,14 +4655,21 @@ pub const Type = extern union { } /// Returns true if and only if the type is a fixed-width integer. - pub fn isInt(self: Type) bool { - return self.isSignedInt() or self.isUnsignedInt(); + pub fn isInt(self: Type, mod: *const Module) bool { + return self.isSignedInt(mod) or self.isUnsignedInt(mod); } /// Returns true if and only if the type is a fixed-width, signed integer. - pub fn isSignedInt(self: Type) bool { - return switch (self.tag()) { - .int_signed, + pub fn isSignedInt(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.signedness == .signed, + .simple_type => |s| return switch (s) { + .c_char, .isize, .c_short, .c_int, .c_long, .c_longlong => true, + else => false, + }, + else => return false, + }; + return switch (ty.tag()) { .i8, .isize, .c_char, @@ -4569,9 +4688,16 @@ pub const Type = extern union { } /// Returns true if and only if the type is a fixed-width, unsigned integer. - pub fn isUnsignedInt(self: Type) bool { - return switch (self.tag()) { - .int_unsigned, + pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.signedness == .unsigned, + .simple_type => |s| return switch (s) { + .usize, .c_ushort, .c_uint, .c_ulong, .c_ulonglong => true, + else => false, + }, + else => return false, + }; + return switch (ty.tag()) { .usize, .c_ushort, .c_uint, @@ -4592,8 +4718,8 @@ pub const Type = extern union { /// Returns true for integers, enums, error sets, and packed structs. /// If this function returns true, then intInfo() can be called on the type. - pub fn isAbiInt(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isAbiInt(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Int, .Enum, .ErrorSet => true, .Struct => ty.containerLayout() == .Packed, else => false, @@ -4601,17 +4727,26 @@ pub const Type = extern union { } /// Asserts the type is an integer, enum, error set, or vector of one of them. - pub fn intInfo(self: Type, target: Target) std.builtin.Type.Int { - var ty = self; + pub fn intInfo(starting_ty: Type, mod: *const Module) InternPool.Key.IntType { + const target = mod.getTarget(); + var ty = starting_ty; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => unreachable, + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + while (true) switch (ty.tag()) { - .int_unsigned => return .{ - .signedness = .unsigned, - .bits = ty.castTag(.int_unsigned).?.data, - }, - .int_signed => return .{ - .signedness = .signed, - .bits = ty.castTag(.int_signed).?.data, - }, .u1 => return .{ .signedness = .unsigned, .bits = 1 }, .u8 => return .{ .signedness = .unsigned, .bits = 8 }, .i8 => return .{ .signedness = .signed, .bits = 8 }, @@ -4729,32 +4864,14 @@ pub const Type = extern union { /// Asserts the type is a function. pub fn fnParamLen(self: Type) usize { - return switch (self.tag()) { - .fn_noreturn_no_args => 0, - .fn_void_no_args => 0, - .fn_naked_noreturn_no_args => 0, - .fn_ccc_void_no_args => 0, - .function => self.castTag(.function).?.data.param_types.len, - - else => unreachable, - }; + return self.castTag(.function).?.data.param_types.len; } /// Asserts the type is a function. The length of the slice must be at least the length /// given by `fnParamLen`. pub fn fnParamTypes(self: Type, types: []Type) void { - switch (self.tag()) { - .fn_noreturn_no_args => return, - .fn_void_no_args => return, - .fn_naked_noreturn_no_args => return, - .fn_ccc_void_no_args => return, - .function => { - const payload = self.castTag(.function).?.data; - @memcpy(types[0..payload.param_types.len], payload.param_types); - }, - - else => unreachable, - } + const payload = self.castTag(.function).?.data; + @memcpy(types[0..payload.param_types.len], payload.param_types); } /// Asserts the type is a function. @@ -4769,33 +4886,15 @@ pub const Type = extern union { } } - /// Asserts the type is a function. - pub fn fnReturnType(self: Type) Type { - return switch (self.tag()) { - .fn_noreturn_no_args => Type.initTag(.noreturn), - .fn_naked_noreturn_no_args => Type.initTag(.noreturn), - - .fn_void_no_args, - .fn_ccc_void_no_args, - => Type.initTag(.void), - - .function => self.castTag(.function).?.data.return_type, - - else => unreachable, - }; + /// Asserts the type is a function or a function pointer. + pub fn fnReturnType(ty: Type) Type { + const fn_ty = if (ty.castPointer()) |p| p.data else ty; + return fn_ty.castTag(.function).?.data.return_type; } /// Asserts the type is a function. pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention { - return switch (self.tag()) { - .fn_noreturn_no_args => .Unspecified, - .fn_void_no_args => .Unspecified, - .fn_naked_noreturn_no_args => .Naked, - .fn_ccc_void_no_args => .C, - .function => self.castTag(.function).?.data.cc, - - else => unreachable, - }; + return self.castTag(.function).?.data.cc; } /// Asserts the type is a function. @@ -4809,15 +4908,15 @@ pub const Type = extern union { }; } - pub fn isValidParamType(self: Type) bool { - return switch (self.zigTypeTagOrPoison() catch return true) { + pub fn isValidParamType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { .Undefined, .Null, .Opaque, .NoReturn => false, else => true, }; } - pub fn isValidReturnType(self: Type) bool { - return switch (self.zigTypeTagOrPoison() catch return true) { + pub fn isValidReturnType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { .Undefined, .Null, .Opaque => false, else => true, }; @@ -4825,87 +4924,43 @@ pub const Type = extern union { /// Asserts the type is a function. pub fn fnIsVarArgs(self: Type) bool { - return switch (self.tag()) { - .fn_noreturn_no_args => false, - .fn_void_no_args => false, - .fn_naked_noreturn_no_args => false, - .fn_ccc_void_no_args => false, - .function => self.castTag(.function).?.data.is_var_args, - - else => unreachable, - }; + return self.castTag(.function).?.data.is_var_args; } pub fn fnInfo(ty: Type) Payload.Function.Data { - return switch (ty.tag()) { - .fn_noreturn_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.noreturn), - .cc = .Unspecified, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .fn_void_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.void), - .cc = .Unspecified, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .fn_naked_noreturn_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.noreturn), - .cc = .Naked, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .fn_ccc_void_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.void), - .cc = .C, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .function => ty.castTag(.function).?.data, - - else => unreachable, - }; + return ty.castTag(.function).?.data; } - pub fn isNumeric(self: Type) bool { - return switch (self.tag()) { + pub fn isNumeric(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => true, + .simple_type => |s| return switch (s) { + .f16, + .f32, + .f64, + .f80, + .f128, + .c_longdouble, + .comptime_int, + .comptime_float, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + => true, + + else => false, + }, + else => false, + }; + return switch (ty.tag()) { .f16, .f32, .f64, @@ -4937,8 +4992,6 @@ pub const Type = extern union { .c_ulong, .c_longlong, .c_ulonglong, - .int_unsigned, - .int_signed, => true, else => false, @@ -4947,8 +5000,30 @@ pub const Type = extern union { /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which /// resolves field types rather than asserting they are already resolved. - pub fn onePossibleValue(starting_type: Type) ?Value { + pub fn onePossibleValue(starting_type: Type, mod: *const Module) ?Value { var ty = starting_type; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return Value.zero; + } else { + return null; + } + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + while (true) switch (ty.tag()) { .f16, .f32, @@ -4988,10 +5063,6 @@ pub const Type = extern union { .error_set_single, .error_set, .error_set_merged, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, .single_const_pointer_to_comptime_int, .array_sentinel, @@ -5047,7 +5118,7 @@ pub const Type = extern union { assert(s.haveFieldTypes()); for (s.fields.values()) |field| { if (field.is_comptime) continue; - if (field.ty.onePossibleValue() != null) continue; + if (field.ty.onePossibleValue(mod) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -5058,7 +5129,7 @@ pub const Type = extern union { for (tuple.values, 0..) |val, i| { const is_comptime = val.tag() != .unreachable_value; if (is_comptime) continue; - if (tuple.types[i].onePossibleValue() != null) continue; + if (tuple.types[i].onePossibleValue(mod) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -5067,7 +5138,7 @@ pub const Type = extern union { .enum_numbered => { const enum_numbered = ty.castTag(.enum_numbered).?.data; // An explicit tag type is always provided for enum_numbered. - if (enum_numbered.tag_ty.hasRuntimeBits()) { + if (enum_numbered.tag_ty.hasRuntimeBits(mod)) { return null; } assert(enum_numbered.fields.count() == 1); @@ -5075,7 +5146,7 @@ pub const Type = extern union { }, .enum_full => { const enum_full = ty.castTag(.enum_full).?.data; - if (enum_full.tag_ty.hasRuntimeBits()) { + if (enum_full.tag_ty.hasRuntimeBits(mod)) { return null; } switch (enum_full.fields.count()) { @@ -5098,7 +5169,7 @@ pub const Type = extern union { }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (!tag_ty.hasRuntimeBits()) { + if (!tag_ty.hasRuntimeBits(mod)) { return Value.zero; } else { return null; @@ -5106,10 +5177,10 @@ pub const Type = extern union { }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - const tag_val = union_obj.tag_ty.onePossibleValue() orelse return null; + const tag_val = union_obj.tag_ty.onePossibleValue(mod) orelse return null; if (union_obj.fields.count() == 0) return Value.initTag(.unreachable_value); const only_field = union_obj.fields.values()[0]; - const val_val = only_field.ty.onePossibleValue() orelse return null; + const val_val = only_field.ty.onePossibleValue(mod) orelse return null; _ = tag_val; _ = val_val; return Value.initTag(.empty_struct_value); @@ -5121,17 +5192,10 @@ pub const Type = extern union { .null => return Value.initTag(.null_value), .undefined => return Value.initTag(.undef), - .int_unsigned, .int_signed => { - if (ty.cast(Payload.Bits).?.data == 0) { - return Value.zero; - } else { - return null; - } - }, .vector, .array, .array_u8 => { if (ty.arrayLen() == 0) return Value.initTag(.empty_array); - if (ty.elemType().onePossibleValue() != null) + if (ty.elemType().onePossibleValue(mod) != null) return Value.initTag(.the_only_possible_value); return null; }, @@ -5146,7 +5210,22 @@ pub const Type = extern union { /// resolves field types rather than asserting they are already resolved. /// TODO merge these implementations together with the "advanced" pattern seen /// elsewhere in this file. - pub fn comptimeOnly(ty: Type) bool { + pub fn comptimeOnly(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return false, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + return switch (ty.tag()) { .u1, .u8, @@ -5211,8 +5290,6 @@ pub const Type = extern union { .generic_poison, .array_u8, .array_u8_sentinel_0, - .int_signed, - .int_unsigned, .enum_simple, => false, @@ -5223,10 +5300,6 @@ pub const Type = extern union { .enum_literal, .type_info, // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, => true, @@ -5236,7 +5309,7 @@ pub const Type = extern union { .array, .array_sentinel, .vector, - => return ty.childType().comptimeOnly(), + => return ty.childType().comptimeOnly(mod), .pointer, .single_const_pointer, @@ -5249,10 +5322,10 @@ pub const Type = extern union { .mut_slice, => { const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { + if (child_ty.zigTypeTag(mod) == .Fn) { return false; } else { - return child_ty.comptimeOnly(); + return child_ty.comptimeOnly(mod); } }, @@ -5261,14 +5334,14 @@ pub const Type = extern union { .optional_single_const_pointer, => { var buf: Type.Payload.ElemType = undefined; - return ty.optionalChild(&buf).comptimeOnly(); + return ty.optionalChild(&buf).comptimeOnly(mod); }, .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { const have_comptime_val = tuple.values[i].tag() != .unreachable_value; - if (!have_comptime_val and field_ty.comptimeOnly()) return true; + if (!have_comptime_val and field_ty.comptimeOnly(mod)) return true; } return false; }, @@ -5301,48 +5374,48 @@ pub const Type = extern union { } }, - .error_union => return ty.errorUnionPayload().comptimeOnly(), + .error_union => return ty.errorUnionPayload().comptimeOnly(mod), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; - return child_ty.comptimeOnly(); + return child_ty.comptimeOnly(mod); }, .enum_numbered => { const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return tag_ty.comptimeOnly(); + return tag_ty.comptimeOnly(mod); }, .enum_full, .enum_nonexhaustive => { const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return tag_ty.comptimeOnly(); + return tag_ty.comptimeOnly(mod); }, }; } - pub fn isArrayOrVector(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, else => false, }; } - pub fn isIndexable(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isIndexable(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, .Pointer => switch (ty.ptrSize()) { .Slice, .Many, .C => true, - .One => ty.elemType().zigTypeTag() == .Array, + .One => ty.elemType().zigTypeTag(mod) == .Array, }, .Struct => ty.isTuple(), else => false, }; } - pub fn indexableHasLen(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn indexableHasLen(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, .Pointer => switch (ty.ptrSize()) { .Many, .C => false, .Slice => true, - .One => ty.elemType().zigTypeTag() == .Array, + .One => ty.elemType().zigTypeTag(mod) == .Array, }, .Struct => ty.isTuple(), else => false, @@ -5366,19 +5439,19 @@ pub const Type = extern union { } // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, arena: Allocator, target: Target) !Value { - const scalar = try minIntScalar(ty.scalarType(), arena, target); - if (ty.zigTypeTag() == .Vector and scalar.tag() != .the_only_possible_value) { + pub fn minInt(ty: Type, arena: Allocator, mod: *const Module) !Value { + const scalar = try minIntScalar(ty.scalarType(mod), arena, mod); + if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { return scalar; } } - /// Asserts that self.zigTypeTag() == .Int. - pub fn minIntScalar(ty: Type, arena: Allocator, target: Target) !Value { - assert(ty.zigTypeTag() == .Int); - const info = ty.intInfo(target); + /// Asserts that self.zigTypeTag(mod) == .Int. + pub fn minIntScalar(ty: Type, arena: Allocator, mod: *const Module) !Value { + assert(ty.zigTypeTag(mod) == .Int); + const info = ty.intInfo(mod); if (info.bits == 0) { return Value.initTag(.the_only_possible_value); @@ -5405,9 +5478,9 @@ pub const Type = extern union { } // Works for vectors and vectors of integers. - pub fn maxInt(ty: Type, arena: Allocator, target: Target) !Value { - const scalar = try maxIntScalar(ty.scalarType(), arena, target); - if (ty.zigTypeTag() == .Vector and scalar.tag() != .the_only_possible_value) { + pub fn maxInt(ty: Type, arena: Allocator, mod: *const Module) !Value { + const scalar = try maxIntScalar(ty.scalarType(mod), arena, mod); + if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { return scalar; @@ -5415,9 +5488,9 @@ pub const Type = extern union { } /// Asserts that self.zigTypeTag() == .Int. - pub fn maxIntScalar(self: Type, arena: Allocator, target: Target) !Value { - assert(self.zigTypeTag() == .Int); - const info = self.intInfo(target); + pub fn maxIntScalar(self: Type, arena: Allocator, mod: *const Module) !Value { + assert(self.zigTypeTag(mod) == .Int); + const info = self.intInfo(mod); if (info.bits == 0) { return Value.initTag(.the_only_possible_value); @@ -5452,21 +5525,25 @@ pub const Type = extern union { } /// Asserts the type is an enum or a union. - pub fn intTagType(ty: Type, buffer: *Payload.Bits) Type { + pub fn intTagType(ty: Type) Type { switch (ty.tag()) { .enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty, .enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty, .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const field_count = enum_simple.fields.count(); - const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); - buffer.* = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - return Type.initPayload(&buffer.base); + @panic("TODO move enum_simple to use the intern pool"); + //const enum_simple = ty.castTag(.enum_simple).?.data; + //const field_count = enum_simple.fields.count(); + //const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); + //buffer.* = .{ + // .base = .{ .tag = .int_unsigned }, + // .data = bits, + //}; + //return Type.initPayload(&buffer.base); + }, + .union_tagged => { + @panic("TODO move union_tagged to use the intern pool"); + //return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer), }, - .union_tagged => return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer), else => unreachable, } } @@ -5566,7 +5643,7 @@ pub const Type = extern union { }; const end_val = Value.initPayload(&end_payload.base); if (int_val.compareAll(.gte, end_val, int_ty, m)) return null; - return @intCast(usize, int_val.toUnsignedInt(m.getTarget())); + return @intCast(usize, int_val.toUnsignedInt(m)); } }; switch (ty.tag()) { @@ -5598,11 +5675,7 @@ pub const Type = extern union { const enum_simple = ty.castTag(.enum_simple).?.data; const fields_len = enum_simple.fields.count(); const bits = std.math.log2_int_ceil(usize, fields_len); - var buffer: Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - const tag_ty = Type.initPayload(&buffer.base); + const tag_ty = mod.intType(.unsigned, bits) catch @panic("TODO: handle OOM here"); return S.fieldWithRange(tag_ty, enum_tag, fields_len, mod); }, .atomic_order, @@ -5675,19 +5748,19 @@ pub const Type = extern union { } } - pub fn structFieldAlign(ty: Type, index: usize, target: Target) u32 { + pub fn structFieldAlign(ty: Type, index: usize, mod: *const Module) u32 { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.layout != .Packed); - return struct_obj.fields.values()[index].alignment(target, struct_obj.layout); + return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.fields.values()[index].normalAlignment(target); + return union_obj.fields.values()[index].normalAlignment(mod); }, - .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(target), - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(target), + .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod), + .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod), else => unreachable, } } @@ -5710,7 +5783,7 @@ pub const Type = extern union { } } - pub fn structFieldValueComptime(ty: Type, index: usize) ?Value { + pub fn structFieldValueComptime(ty: Type, mod: *const Module, index: usize) ?Value { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -5718,14 +5791,14 @@ pub const Type = extern union { if (field.is_comptime) { return field.default_val; } else { - return field.ty.onePossibleValue(); + return field.ty.onePossibleValue(mod); } }, .tuple => { const tuple = ty.castTag(.tuple).?.data; const val = tuple.values[index]; if (val.tag() == .unreachable_value) { - return tuple.types[index].onePossibleValue(); + return tuple.types[index].onePossibleValue(mod); } else { return val; } @@ -5734,7 +5807,7 @@ pub const Type = extern union { const anon_struct = ty.castTag(.anon_struct).?.data; const val = anon_struct.values[index]; if (val.tag() == .unreachable_value) { - return anon_struct.types[index].onePossibleValue(); + return anon_struct.types[index].onePossibleValue(mod); } else { return val; } @@ -5765,7 +5838,7 @@ pub const Type = extern union { } } - pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, target: Target) u32 { + pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *const Module) u32 { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.layout == .Packed); comptime assert(Type.packed_struct_layout_version == 2); @@ -5774,9 +5847,9 @@ pub const Type = extern union { var elem_size_bits: u16 = undefined; var running_bits: u16 = 0; for (struct_obj.fields.values(), 0..) |f, i| { - if (!f.ty.hasRuntimeBits()) continue; + if (!f.ty.hasRuntimeBits(mod)) continue; - const field_bits = @intCast(u16, f.ty.bitSize(target)); + const field_bits = @intCast(u16, f.ty.bitSize(mod)); if (i == field_index) { bit_offset = running_bits; elem_size_bits = field_bits; @@ -5797,9 +5870,10 @@ pub const Type = extern union { offset: u64 = 0, big_align: u32 = 0, struct_obj: *Module.Struct, - target: Target, + module: *const Module, pub fn next(it: *StructOffsetIterator) ?FieldOffset { + const mod = it.module; var i = it.field; if (it.struct_obj.fields.count() <= i) return null; @@ -5811,35 +5885,35 @@ pub const Type = extern union { const field = it.struct_obj.fields.values()[i]; it.field += 1; - if (field.is_comptime or !field.ty.hasRuntimeBits()) { + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) { return FieldOffset{ .field = i, .offset = it.offset }; } - const field_align = field.alignment(it.target, it.struct_obj.layout); + const field_align = field.alignment(mod, it.struct_obj.layout); it.big_align = @max(it.big_align, field_align); const field_offset = std.mem.alignForwardGeneric(u64, it.offset, field_align); - it.offset = field_offset + field.ty.abiSize(it.target); + it.offset = field_offset + field.ty.abiSize(mod); return FieldOffset{ .field = i, .offset = field_offset }; } }; /// Get an iterator that iterates over all the struct field, returning the field and /// offset of that field. Asserts that the type is a non-packed struct. - pub fn iterateStructOffsets(ty: Type, target: Target) StructOffsetIterator { + pub fn iterateStructOffsets(ty: Type, mod: *const Module) StructOffsetIterator { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); - return .{ .struct_obj = struct_obj, .target = target }; + return .{ .struct_obj = struct_obj, .module = mod }; } /// Supports structs and unions. - pub fn structFieldOffset(ty: Type, index: usize, target: Target) u64 { + pub fn structFieldOffset(ty: Type, index: usize, mod: *const Module) u64 { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); - var it = ty.iterateStructOffsets(target); + var it = ty.iterateStructOffsets(mod); while (it.next()) |field_offset| { if (index == field_offset.field) return field_offset.offset; @@ -5856,17 +5930,17 @@ pub const Type = extern union { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) { + if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { // comptime field if (i == index) return offset; continue; } - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); offset = std.mem.alignForwardGeneric(u64, offset, field_align); if (i == index) return offset; - offset += field_ty.abiSize(target); + offset += field_ty.abiSize(mod); } offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); return offset; @@ -5875,7 +5949,7 @@ pub const Type = extern union { .@"union" => return 0, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - const layout = union_obj.getLayout(target, true); + const layout = union_obj.getLayout(mod, true); if (layout.tag_align >= layout.payload_align) { // {Tag, Payload} return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); @@ -6050,10 +6124,6 @@ pub const Type = extern union { manyptr_u8, manyptr_const_u8, manyptr_const_u8_sentinel_0, - fn_noreturn_no_args, - fn_void_no_args, - fn_naked_noreturn_no_args, - fn_ccc_void_no_args, single_const_pointer_to_comptime_int, const_slice_u8, const_slice_u8_sentinel_0, @@ -6087,8 +6157,6 @@ pub const Type = extern union { c_mut_pointer, const_slice, mut_slice, - int_signed, - int_unsigned, function, optional, optional_single_mut_pointer, @@ -6157,10 +6225,6 @@ pub const Type = extern union { .enum_literal, .null, .undefined, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .single_const_pointer_to_comptime_int, .anyerror_void_error_union, .const_slice_u8, @@ -6204,10 +6268,6 @@ pub const Type = extern union { .anyframe_T, => Payload.ElemType, - .int_signed, - .int_unsigned, - => Payload.Bits, - .error_set => Payload.ErrorSet, .error_set_inferred => Payload.ErrorSetInferred, .error_set_merged => Payload.ErrorSetMerged, @@ -6232,7 +6292,10 @@ pub const Type = extern union { pub fn init(comptime t: Tag) file_struct.Type { comptime std.debug.assert(@enumToInt(t) < Tag.no_payload_count); - return .{ .tag_if_small_enough = t }; + return file_struct.Type{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = t }, + }; } pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!file_struct.Type { @@ -6241,7 +6304,10 @@ pub const Type = extern union { .base = .{ .tag = t }, .data = data, }; - return file_struct.Type{ .ptr_otherwise = &p.base }; + return file_struct.Type{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &p.base }, + }; } pub fn Data(comptime t: Tag) type { @@ -6422,10 +6488,9 @@ pub const Type = extern union { runtime = std.math.maxInt(u32) - 1, _, }; - - pub fn alignment(data: Data, target: Target) u32 { + pub fn alignment(data: Data, mod: *const Module) u32 { if (data.@"align" != 0) return data.@"align"; - return abiAlignment(data.pointee_type, target); + return abiAlignment(data.pointee_type, mod); } }; }; @@ -6537,12 +6602,11 @@ pub const Type = extern union { pub const @"anyerror" = initTag(.anyerror); pub const @"anyopaque" = initTag(.anyopaque); pub const @"null" = initTag(.null); + pub const @"noreturn" = initTag(.noreturn); pub const err_int = Type.u16; pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type { - const target = mod.getTarget(); - var d = data; if (d.size == .C) { @@ -6554,8 +6618,8 @@ pub const Type = extern union { // pointee type needs to be resolved more, that needs to be done before calling // this ptr() function. if (d.@"align" != 0) canonicalize: { - if (!d.pointee_type.layoutIsResolved()) break :canonicalize; - if (d.@"align" == d.pointee_type.abiAlignment(target)) { + if (!d.pointee_type.layoutIsResolved(mod)) break :canonicalize; + if (d.@"align" == d.pointee_type.abiAlignment(mod)) { d.@"align" = 0; } } @@ -6565,7 +6629,7 @@ pub const Type = extern union { // needs to be resolved before calling this ptr() function. if (d.host_size != 0) { assert(d.bit_offset < d.host_size * 8); - if (d.host_size * 8 == d.pointee_type.bitSize(target)) { + if (d.host_size * 8 == d.pointee_type.bitSize(mod)) { assert(d.bit_offset == 0); d.host_size = 0; } @@ -6676,7 +6740,7 @@ pub const Type = extern union { payload: Type, mod: *Module, ) Allocator.Error!Type { - assert(error_set.zigTypeTag() == .ErrorSet); + assert(error_set.zigTypeTag(mod) == .ErrorSet); if (error_set.eql(Type.anyerror, mod) and payload.eql(Type.void, mod)) { @@ -6696,83 +6760,6 @@ pub const Type = extern union { return @intCast(u16, base + @boolToInt(upper < max)); } - pub fn smallestUnsignedInt(arena: Allocator, max: u64) !Type { - const bits = smallestUnsignedBits(max); - return intWithBits(arena, false, bits); - } - - pub fn intWithBits(arena: Allocator, sign: bool, bits: u16) !Type { - return if (sign) switch (bits) { - 8 => initTag(.i8), - 16 => initTag(.i16), - 32 => initTag(.i32), - 64 => initTag(.i64), - else => return Tag.int_signed.create(arena, bits), - } else switch (bits) { - 1 => initTag(.u1), - 8 => initTag(.u8), - 16 => initTag(.u16), - 32 => initTag(.u32), - 64 => initTag(.u64), - else => return Tag.int_unsigned.create(arena, bits), - }; - } - - /// Given a value representing an integer, returns the number of bits necessary to represent - /// this value in an integer. If `sign` is true, returns the number of bits necessary in a - /// twos-complement integer; otherwise in an unsigned integer. - /// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. - pub fn intBitsForValue(target: Target, val: Value, sign: bool) u16 { - assert(!val.isUndef()); - switch (val.tag()) { - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true }; - return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - // Zero is still a possibility, in which case unsigned is fine - for (limbs) |limb| { - if (limb != 0) break; - } else return 0; // val == 0 - assert(sign); - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false }; - return @intCast(u16, big.bitCountTwosComp()); - }, - .int_i64 => { - const x = val.castTag(.int_i64).?.data; - if (x >= 0) return smallestUnsignedBits(@intCast(u64, x)); - assert(sign); - return smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; - }, - else => { - const x = val.toUnsignedInt(target); - return smallestUnsignedBits(x) + @boolToInt(sign); - }, - } - } - - /// Returns the smallest possible integer type containing both `min` and `max`. Asserts that neither - /// value is undef. - /// TODO: if #3806 is implemented, this becomes trivial - pub fn intFittingRange(target: Target, arena: Allocator, min: Value, max: Value) !Type { - assert(!min.isUndef()); - assert(!max.isUndef()); - - if (std.debug.runtime_safety) { - assert(Value.order(min, max, target).compare(.lte)); - } - - const sign = min.orderAgainstZero() == .lt; - - const min_val_bits = intBitsForValue(target, min, sign); - const max_val_bits = intBitsForValue(target, max, sign); - const bits = @max(min_val_bits, max_val_bits); - - return intWithBits(arena, sign, bits); - } - /// This is only used for comptime asserts. Bump this number when you make a change /// to packed struct layout to find out all the places in the codebase you need to edit! pub const packed_struct_layout_version = 2; diff --git a/src/value.zig b/src/value.zig index af2d7b1ca266..8c824b07205e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -11,17 +11,24 @@ const Module = @import("Module.zig"); const Air = @import("Air.zig"); const TypedValue = @import("TypedValue.zig"); const Sema = @import("Sema.zig"); - -/// This is the raw data, with no bookkeeping, no memory awareness, -/// no de-duplication, and no type system awareness. -/// It's important for this type to be small. -/// This union takes advantage of the fact that the first page of memory -/// is unmapped, giving us 4096 possible enum tags that have no payload. -pub const Value = extern union { - /// If the tag value is less than Tag.no_payload_count, then no pointer - /// dereference is needed. - tag_if_small_enough: Tag, - ptr_otherwise: *Payload, +const InternPool = @import("InternPool.zig"); + +pub const Value = struct { + /// We are migrating towards using this for every Value object. However, many + /// values are still represented the legacy way. This is indicated by using + /// InternPool.Index.none. + ip_index: InternPool.Index, + + /// This is the raw data, with no bookkeeping, no memory awareness, + /// no de-duplication, and no type system awareness. + /// This union takes advantage of the fact that the first page of memory + /// is unmapped, giving us 4096 possible enum tags that have no payload. + legacy: extern union { + /// If the tag value is less than Tag.no_payload_count, then no pointer + /// dereference is needed. + tag_if_small_enough: Tag, + ptr_otherwise: *Payload, + }, // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { @@ -81,10 +88,6 @@ pub const Value = extern union { manyptr_u8_type, manyptr_const_u8_type, manyptr_const_u8_sentinel_0_type, - fn_noreturn_no_args_type, - fn_void_no_args_type, - fn_naked_noreturn_no_args_type, - fn_ccc_void_no_args_type, single_const_pointer_to_comptime_int_type, const_slice_u8_type, const_slice_u8_sentinel_0_type, @@ -108,7 +111,6 @@ pub const Value = extern union { // After this, the tag requires a payload. ty, - int_type, int_u64, int_i64, int_big_positive, @@ -232,10 +234,6 @@ pub const Value = extern union { .noreturn_type, .null_type, .undefined_type, - .fn_noreturn_no_args_type, - .fn_void_no_args_type, - .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int_type, .anyframe_type, .const_slice_u8_type, @@ -304,7 +302,6 @@ pub const Value = extern union { .lazy_size, => Payload.Ty, - .int_type => Payload.IntType, .int_u64 => Payload.U64, .int_i64 => Payload.I64, .function => Payload.Function, @@ -332,7 +329,10 @@ pub const Value = extern union { .base = .{ .tag = t }, .data = data, }; - return Value{ .ptr_otherwise = &ptr.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &ptr.base }, + }; } pub fn Data(comptime t: Tag) type { @@ -342,37 +342,47 @@ pub const Value = extern union { pub fn initTag(small_tag: Tag) Value { assert(@enumToInt(small_tag) < Tag.no_payload_count); - return .{ .tag_if_small_enough = small_tag }; + return Value{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = small_tag }, + }; } pub fn initPayload(payload: *Payload) Value { assert(@enumToInt(payload.tag) >= Tag.no_payload_count); - return .{ .ptr_otherwise = payload }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = payload }, + }; } pub fn tag(self: Value) Tag { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return self.tag_if_small_enough; + assert(self.ip_index == .none); + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { + return self.legacy.tag_if_small_enough; } else { - return self.ptr_otherwise.tag; + return self.legacy.ptr_otherwise.tag; } } /// Prefer `castTag` to this. pub fn cast(self: Value, comptime T: type) ?*T { + if (self.ip_index != .none) { + return null; + } if (@hasField(T, "base_tag")) { return self.castTag(T.base_tag); } - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { return null; } inline for (@typeInfo(Tag).Enum.fields) |field| { if (field.value < Tag.no_payload_count) continue; const t = @intToEnum(Tag, field.value); - if (self.ptr_otherwise.tag == t) { + if (self.legacy.ptr_otherwise.tag == t) { if (T == t.Type()) { - return @fieldParentPtr(T, "base", self.ptr_otherwise); + return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise); } return null; } @@ -381,11 +391,15 @@ pub const Value = extern union { } pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) + if (self.ip_index != .none) { + return null; + } + + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; - if (self.ptr_otherwise.tag == t) - return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise); + if (self.legacy.ptr_otherwise.tag == t) + return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise); return null; } @@ -393,9 +407,15 @@ pub const Value = extern union { /// It's intentional that this function is not passed a corresponding Type, so that /// a Value can be copied from a Sema to a Decl prior to resolving struct/union field types. pub fn copy(self: Value, arena: Allocator) error{OutOfMemory}!Value { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return Value{ .tag_if_small_enough = self.tag_if_small_enough }; - } else switch (self.ptr_otherwise.tag) { + if (self.ip_index != .none) { + return Value{ .ip_index = self.ip_index, .legacy = undefined }; + } + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { + return Value{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, + }; + } else switch (self.legacy.ptr_otherwise.tag) { .u1_type, .u8_type, .i8_type, @@ -435,10 +455,6 @@ pub const Value = extern union { .noreturn_type, .null_type, .undefined_type, - .fn_noreturn_no_args_type, - .fn_void_no_args_type, - .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int_type, .anyframe_type, .const_slice_u8_type, @@ -481,19 +497,24 @@ pub const Value = extern union { .base = payload.base, .data = try payload.data.copy(arena), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, - .int_type => return self.copyPayloadShallow(arena, Payload.IntType), .int_u64 => return self.copyPayloadShallow(arena, Payload.U64), .int_i64 => return self.copyPayloadShallow(arena, Payload.I64), .int_big_positive, .int_big_negative => { const old_payload = self.cast(Payload.BigInt).?; const new_payload = try arena.create(Payload.BigInt); new_payload.* = .{ - .base = .{ .tag = self.ptr_otherwise.tag }, + .base = .{ .tag = self.legacy.ptr_otherwise.tag }, .data = try arena.dupe(std.math.big.Limb, old_payload.data), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .function => return self.copyPayloadShallow(arena, Payload.Function), .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn), @@ -512,7 +533,10 @@ pub const Value = extern union { .container_ty = try payload.data.container_ty.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .comptime_field_ptr => { const payload = self.cast(Payload.ComptimeFieldPtr).?; @@ -524,7 +548,10 @@ pub const Value = extern union { .field_ty = try payload.data.field_ty.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .elem_ptr => { const payload = self.castTag(.elem_ptr).?; @@ -537,7 +564,10 @@ pub const Value = extern union { .index = payload.data.index, }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .field_ptr => { const payload = self.castTag(.field_ptr).?; @@ -550,7 +580,10 @@ pub const Value = extern union { .field_index = payload.data.field_index, }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .bytes => { const bytes = self.castTag(.bytes).?.data; @@ -559,7 +592,10 @@ pub const Value = extern union { .base = .{ .tag = .bytes }, .data = try arena.dupe(u8, bytes), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .str_lit => return self.copyPayloadShallow(arena, Payload.StrLit), .repeated, @@ -574,7 +610,10 @@ pub const Value = extern union { .base = payload.base, .data = try payload.data.copy(arena), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .slice => { const payload = self.castTag(.slice).?; @@ -586,7 +625,10 @@ pub const Value = extern union { .len = try payload.data.len.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .float_16 => return self.copyPayloadShallow(arena, Payload.Float_16), .float_32 => return self.copyPayloadShallow(arena, Payload.Float_32), @@ -600,7 +642,10 @@ pub const Value = extern union { .base = payload.base, .data = try arena.dupe(u8, payload.data), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .enum_field_index => return self.copyPayloadShallow(arena, Payload.U32), .@"error" => return self.copyPayloadShallow(arena, Payload.Error), @@ -615,7 +660,10 @@ pub const Value = extern union { for (new_payload.data, 0..) |*elem, i| { elem.* = try payload.data[i].copy(arena); } - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .@"union" => { @@ -628,7 +676,10 @@ pub const Value = extern union { .val = try tag_and_val.val.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .inferred_alloc => unreachable, @@ -640,7 +691,10 @@ pub const Value = extern union { const payload = self.cast(T).?; const new_payload = try arena.create(T); new_payload.* = payload.*; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; } pub fn format(val: Value, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { @@ -660,6 +714,10 @@ pub const Value = extern union { out_stream: anytype, ) !void { comptime assert(fmt.len == 0); + if (start_val.ip_index != .none) { + try out_stream.print("(interned {d})", .{@enumToInt(start_val.ip_index)}); + return; + } var val = start_val; while (true) switch (val.tag()) { .u1_type => return out_stream.writeAll("u1"), @@ -701,10 +759,6 @@ pub const Value = extern union { .noreturn_type => return out_stream.writeAll("noreturn"), .null_type => return out_stream.writeAll("@Type(.Null)"), .undefined_type => return out_stream.writeAll("@Type(.Undefined)"), - .fn_noreturn_no_args_type => return out_stream.writeAll("fn() noreturn"), - .fn_void_no_args_type => return out_stream.writeAll("fn() void"), - .fn_naked_noreturn_no_args_type => return out_stream.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args_type => return out_stream.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"), .anyframe_type => return out_stream.writeAll("anyframe"), .const_slice_u8_type => return out_stream.writeAll("[]const u8"), @@ -755,13 +809,6 @@ pub const Value = extern union { try val.castTag(.lazy_size).?.data.dump("", options, out_stream); return try out_stream.writeAll(")"); }, - .int_type => { - const int_type = val.castTag(.int_type).?.data; - return out_stream.print("{s}{d}", .{ - if (int_type.signed) "s" else "u", - int_type.bits, - }); - }, .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", options, out_stream), .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), @@ -848,7 +895,6 @@ pub const Value = extern union { /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { - const target = mod.getTarget(); switch (val.tag()) { .bytes => { const bytes = val.castTag(.bytes).?.data; @@ -863,7 +909,7 @@ pub const Value = extern union { }, .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data), .repeated => { - const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(target)); + const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod)); const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen())); @memset(result, byte); return result; @@ -877,7 +923,7 @@ pub const Value = extern union { .the_only_possible_value => return &[_]u8{}, .slice => { const slice = val.castTag(.slice).?.data; - return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(target), allocator, mod); + return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod); }, else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, mod), } @@ -888,15 +934,19 @@ pub const Value = extern union { var elem_value_buf: ElemValueBuffer = undefined; for (result, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf); - elem.* = @intCast(u8, elem_val.toUnsignedInt(mod.getTarget())); + elem.* = @intCast(u8, elem_val.toUnsignedInt(mod)); } return result; } - pub const ToTypeBuffer = Type.Payload.Bits; - /// Asserts that the value is representable as a type. - pub fn toType(self: Value, buffer: *ToTypeBuffer) Type { + pub fn toType(self: Value) Type { + if (self.ip_index != .none) { + return .{ + .ip_index = self.ip_index, + .legacy = undefined, + }; + } return switch (self.tag()) { .ty => self.castTag(.ty).?.data, .u1_type => Type.initTag(.u1), @@ -938,10 +988,6 @@ pub const Value = extern union { .noreturn_type => Type.initTag(.noreturn), .null_type => Type.initTag(.null), .undefined_type => Type.initTag(.undefined), - .fn_noreturn_no_args_type => Type.initTag(.fn_noreturn_no_args), - .fn_void_no_args_type => Type.initTag(.fn_void_no_args), - .fn_naked_noreturn_no_args_type => Type.initTag(.fn_naked_noreturn_no_args), - .fn_ccc_void_no_args_type => Type.initTag(.fn_ccc_void_no_args), .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), .anyframe_type => Type.initTag(.@"anyframe"), .const_slice_u8_type => Type.initTag(.const_slice_u8), @@ -964,17 +1010,6 @@ pub const Value = extern union { .extern_options_type => Type.initTag(.extern_options), .type_info_type => Type.initTag(.type_info), - .int_type => { - const payload = self.castTag(.int_type).?.data; - buffer.* = .{ - .base = .{ - .tag = if (payload.signed) .int_signed else .int_unsigned, - }, - .data = payload.bits, - }; - return Type.initPayload(&buffer.base); - }, - else => unreachable, }; } @@ -1050,7 +1085,7 @@ pub const Value = extern union { } pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { - if (ty.zigTypeTag() == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(), mod); + if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(), mod); const field_index = switch (val.tag()) { .enum_field_index => val.castTag(.enum_field_index).?.data, @@ -1068,10 +1103,9 @@ pub const Value = extern union { }; if (values.entries.len == 0) { // auto-numbered enum - break :field_index @intCast(u32, val.toUnsignedInt(mod.getTarget())); + break :field_index @intCast(u32, val.toUnsignedInt(mod)); } - var buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); + const int_tag_ty = ty.intTagType(); break :field_index @intCast(u32, values.getIndexContext(val, .{ .ty = int_tag_ty, .mod = mod }).?); }, }; @@ -1086,15 +1120,15 @@ pub const Value = extern union { } /// Asserts the value is an integer. - pub fn toBigInt(val: Value, space: *BigIntSpace, target: Target) BigIntConst { - return val.toBigIntAdvanced(space, target, null) catch unreachable; + pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *const Module) BigIntConst { + return val.toBigIntAdvanced(space, mod, null) catch unreachable; } /// Asserts the value is an integer. pub fn toBigIntAdvanced( val: Value, space: *BigIntSpace, - target: Target, + mod: *const Module, opt_sema: ?*Sema, ) Module.CompileError!BigIntConst { switch (val.tag()) { @@ -1114,7 +1148,7 @@ pub const Value = extern union { }, .runtime_value => { const sub_val = val.castTag(.runtime_value).?.data; - return sub_val.toBigIntAdvanced(space, target, opt_sema); + return sub_val.toBigIntAdvanced(space, mod, opt_sema); }, .int_u64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(), .int_i64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(), @@ -1128,7 +1162,7 @@ pub const Value = extern union { if (opt_sema) |sema| { try sema.resolveTypeLayout(ty); } - const x = ty.abiAlignment(target); + const x = ty.abiAlignment(mod); return BigIntMutable.init(&space.limbs, x).toConst(); }, .lazy_size => { @@ -1136,14 +1170,14 @@ pub const Value = extern union { if (opt_sema) |sema| { try sema.resolveTypeLayout(ty); } - const x = ty.abiSize(target); + const x = ty.abiSize(mod); return BigIntMutable.init(&space.limbs, x).toConst(); }, .elem_ptr => { const elem_ptr = val.castTag(.elem_ptr).?.data; - const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(target, opt_sema)).?; - const elem_size = elem_ptr.elem_ty.abiSize(target); + const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(mod, opt_sema)).?; + const elem_size = elem_ptr.elem_ty.abiSize(mod); const new_addr = array_addr + elem_size * elem_ptr.index; return BigIntMutable.init(&space.limbs, new_addr).toConst(); }, @@ -1154,13 +1188,13 @@ pub const Value = extern union { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. - pub fn getUnsignedInt(val: Value, target: Target) ?u64 { - return getUnsignedIntAdvanced(val, target, null) catch unreachable; + pub fn getUnsignedInt(val: Value, mod: *const Module) ?u64 { + return getUnsignedIntAdvanced(val, mod, null) catch unreachable; } /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. - pub fn getUnsignedIntAdvanced(val: Value, target: Target, opt_sema: ?*Sema) !?u64 { + pub fn getUnsignedIntAdvanced(val: Value, mod: *const Module, opt_sema: ?*Sema) !?u64 { switch (val.tag()) { .zero, .bool_false, @@ -1181,17 +1215,17 @@ pub const Value = extern union { .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { - return (try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar; + return (try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; } else { - return ty.abiAlignment(target); + return ty.abiAlignment(mod); } }, .lazy_size => { const ty = val.castTag(.lazy_size).?.data; if (opt_sema) |sema| { - return (try ty.abiSizeAdvanced(target, .{ .sema = sema })).scalar; + return (try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar; } else { - return ty.abiSize(target); + return ty.abiSize(mod); } }, @@ -1200,12 +1234,12 @@ pub const Value = extern union { } /// Asserts the value is an integer and it fits in a u64 - pub fn toUnsignedInt(val: Value, target: Target) u64 { - return getUnsignedInt(val, target).?; + pub fn toUnsignedInt(val: Value, mod: *const Module) u64 { + return getUnsignedInt(val, mod).?; } /// Asserts the value is an integer and it fits in a i64 - pub fn toSignedInt(val: Value, target: Target) i64 { + pub fn toSignedInt(val: Value, mod: *const Module) i64 { switch (val.tag()) { .zero, .bool_false, @@ -1223,11 +1257,11 @@ pub const Value = extern union { .lazy_align => { const ty = val.castTag(.lazy_align).?.data; - return @intCast(i64, ty.abiAlignment(target)); + return @intCast(i64, ty.abiAlignment(mod)); }, .lazy_size => { const ty = val.castTag(.lazy_size).?.data; - return @intCast(i64, ty.abiSize(target)); + return @intCast(i64, ty.abiSize(mod)); }, .undef => unreachable, @@ -1276,17 +1310,17 @@ pub const Value = extern union { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef()) { - const size = @intCast(usize, ty.abiSize(target)); + const size = @intCast(usize, ty.abiSize(mod)); @memset(buffer[0..size], 0xaa); return; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => {}, .Bool => { buffer[0] = @boolToInt(val.toBool()); }, .Int, .Enum => { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; @@ -1307,7 +1341,7 @@ pub const Value = extern union { }; } else { var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, target); + const bigint = int_val.toBigInt(&bigint_buffer, mod); bigint.writeTwosComplement(buffer[0..byte_count], endian); } }, @@ -1322,7 +1356,7 @@ pub const Value = extern union { .Array => { const len = ty.arrayLen(); const elem_ty = ty.childType(); - const elem_size = @intCast(usize, elem_ty.abiSize(target)); + const elem_size = @intCast(usize, elem_ty.abiSize(mod)); var elem_i: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; var buf_off: usize = 0; @@ -1335,7 +1369,7 @@ pub const Value = extern union { .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, .Struct => switch (ty.containerLayout()) { @@ -1344,12 +1378,12 @@ pub const Value = extern union { const fields = ty.structFields().values(); const field_vals = val.castTag(.aggregate).?.data; for (fields, 0..) |field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, target)); + const off = @intCast(usize, ty.structFieldOffset(i, mod)); try writeToMemory(field_vals[i], field.ty, mod, buffer[off..]); } }, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, @@ -1363,7 +1397,7 @@ pub const Value = extern union { .Auto => return error.IllDefinedMemoryLayout, .Extern => return error.Unimplemented, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, @@ -1373,10 +1407,10 @@ pub const Value = extern union { return val.writeToMemory(Type.usize, mod, buffer); }, .Optional => { - if (!ty.isPtrLikeOptional()) return error.IllDefinedMemoryLayout; + if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout; var buf: Type.Payload.ElemType = undefined; const child = ty.optionalChild(&buf); - const opt_val = val.optionalValue(); + const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToMemory(child, mod, buffer); } else { @@ -1395,11 +1429,11 @@ pub const Value = extern union { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef()) { - const bit_size = @intCast(usize, ty.bitSize(target)); + const bit_size = @intCast(usize, ty.bitSize(mod)); std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian); return; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => {}, .Bool => { const byte_index = switch (endian) { @@ -1413,8 +1447,8 @@ pub const Value = extern union { } }, .Int, .Enum => { - const bits = ty.intInfo(target).bits; - const abi_size = @intCast(usize, ty.abiSize(target)); + const bits = ty.intInfo(mod).bits; + const abi_size = @intCast(usize, ty.abiSize(mod)); var enum_buffer: Payload.U64 = undefined; const int_val = val.enumToInt(ty, &enum_buffer); @@ -1431,7 +1465,7 @@ pub const Value = extern union { std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian); } else { var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, target); + const bigint = int_val.toBigInt(&bigint_buffer, mod); bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian); } }, @@ -1445,7 +1479,7 @@ pub const Value = extern union { }, .Vector => { const elem_ty = ty.childType(); - const elem_bit_size = @intCast(u16, elem_ty.bitSize(target)); + const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); const len = @intCast(usize, ty.arrayLen()); var bits: u16 = 0; @@ -1467,7 +1501,7 @@ pub const Value = extern union { const fields = ty.structFields().values(); const field_vals = val.castTag(.aggregate).?.data; for (fields, 0..) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(target)); + const field_bits = @intCast(u16, field.ty.bitSize(mod)); try field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits); bits += field_bits; } @@ -1479,7 +1513,7 @@ pub const Value = extern union { .Packed => { const field_index = ty.unionTagFieldIndex(val.unionTag(), mod); const field_type = ty.unionFields().values()[field_index.?].ty; - const field_val = val.fieldValue(field_type, field_index.?); + const field_val = val.fieldValue(field_type, mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); }, @@ -1490,10 +1524,10 @@ pub const Value = extern union { return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset); }, .Optional => { - assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); var buf: Type.Payload.ElemType = undefined; const child = ty.optionalChild(&buf); - const opt_val = val.optionalValue(); + const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToPackedMemory(child, mod, buffer, bit_offset); } else { @@ -1516,7 +1550,7 @@ pub const Value = extern union { ) Allocator.Error!Value { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => return Value.void, .Bool => { if (buffer[0] == 0) { @@ -1526,7 +1560,7 @@ pub const Value = extern union { } }, .Int, .Enum => { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; if (bits == 0 or buffer.len == 0) return Value.zero; @@ -1560,7 +1594,7 @@ pub const Value = extern union { }, .Array => { const elem_ty = ty.childType(); - const elem_size = elem_ty.abiSize(target); + const elem_size = elem_ty.abiSize(mod); const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); var offset: usize = 0; for (elems) |*elem| { @@ -1572,7 +1606,7 @@ pub const Value = extern union { .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, .Struct => switch (ty.containerLayout()) { @@ -1581,14 +1615,14 @@ pub const Value = extern union { const fields = ty.structFields().values(); const field_vals = try arena.alloc(Value, fields.len); for (fields, 0..) |field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, target)); - const sz = @intCast(usize, ty.structFieldType(i).abiSize(target)); + const off = @intCast(usize, ty.structFieldOffset(i, mod)); + const sz = @intCast(usize, ty.structFieldType(i).abiSize(mod)); field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena); } return Tag.aggregate.create(arena, field_vals); }, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, }, @@ -1609,7 +1643,7 @@ pub const Value = extern union { return readFromMemory(Type.usize, mod, buffer, arena); }, .Optional => { - assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); var buf: Type.Payload.ElemType = undefined; const child = ty.optionalChild(&buf); return readFromMemory(child, mod, buffer, arena); @@ -1631,7 +1665,7 @@ pub const Value = extern union { ) Allocator.Error!Value { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => return Value.void, .Bool => { const byte = switch (endian) { @@ -1646,8 +1680,8 @@ pub const Value = extern union { }, .Int, .Enum => { if (buffer.len == 0) return Value.zero; - const int_info = ty.intInfo(target); - const abi_size = @intCast(usize, ty.abiSize(target)); + const int_info = ty.intInfo(mod); + const abi_size = @intCast(usize, ty.abiSize(mod)); const bits = int_info.bits; if (bits == 0) return Value.zero; @@ -1677,7 +1711,7 @@ pub const Value = extern union { const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); var bits: u16 = 0; - const elem_bit_size = @intCast(u16, elem_ty.bitSize(target)); + const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); for (elems, 0..) |_, i| { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i; @@ -1694,7 +1728,7 @@ pub const Value = extern union { const fields = ty.structFields().values(); const field_vals = try arena.alloc(Value, fields.len); for (fields, 0..) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(target)); + const field_bits = @intCast(u16, field.ty.bitSize(mod)); field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena); bits += field_bits; } @@ -1706,7 +1740,7 @@ pub const Value = extern union { return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena); }, .Optional => { - assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); var buf: Type.Payload.ElemType = undefined; const child = ty.optionalChild(&buf); return readFromPackedMemory(child, mod, buffer, bit_offset, arena); @@ -1764,8 +1798,8 @@ pub const Value = extern union { } } - pub fn clz(val: Value, ty: Type, target: Target) u64 { - const ty_bits = ty.intInfo(target).bits; + pub fn clz(val: Value, ty: Type, mod: *const Module) u64 { + const ty_bits = ty.intInfo(mod).bits; switch (val.tag()) { .zero, .bool_false => return ty_bits, .one, .bool_true => return ty_bits - 1, @@ -1792,7 +1826,7 @@ pub const Value = extern union { .lazy_align, .lazy_size => { var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, target, null) catch unreachable; + const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; return bigint.clz(ty_bits); }, @@ -1800,8 +1834,8 @@ pub const Value = extern union { } } - pub fn ctz(val: Value, ty: Type, target: Target) u64 { - const ty_bits = ty.intInfo(target).bits; + pub fn ctz(val: Value, ty: Type, mod: *const Module) u64 { + const ty_bits = ty.intInfo(mod).bits; switch (val.tag()) { .zero, .bool_false => return ty_bits, .one, .bool_true => return 0, @@ -1828,7 +1862,7 @@ pub const Value = extern union { .lazy_align, .lazy_size => { var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, target, null) catch unreachable; + const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; return bigint.ctz(); }, @@ -1836,7 +1870,7 @@ pub const Value = extern union { } } - pub fn popCount(val: Value, ty: Type, target: Target) u64 { + pub fn popCount(val: Value, ty: Type, mod: *const Module) u64 { assert(!val.isUndef()); switch (val.tag()) { .zero, .bool_false => return 0, @@ -1845,22 +1879,22 @@ pub const Value = extern union { .int_u64 => return @popCount(val.castTag(.int_u64).?.data), else => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var buffer: Value.BigIntSpace = undefined; - const int = val.toBigInt(&buffer, target); + const int = val.toBigInt(&buffer, mod); return @intCast(u64, int.popCount(info.bits)); }, } } - pub fn bitReverse(val: Value, ty: Type, target: Target, arena: Allocator) !Value { + pub fn bitReverse(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value { assert(!val.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var buffer: Value.BigIntSpace = undefined; - const operand_bigint = val.toBigInt(&buffer, target); + const operand_bigint = val.toBigInt(&buffer, mod); const limbs = try arena.alloc( std.math.big.Limb, @@ -1872,16 +1906,16 @@ pub const Value = extern union { return fromBigInt(arena, result_bigint.toConst()); } - pub fn byteSwap(val: Value, ty: Type, target: Target, arena: Allocator) !Value { + pub fn byteSwap(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value { assert(!val.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); // Bit count must be evenly divisible by 8 assert(info.bits % 8 == 0); var buffer: Value.BigIntSpace = undefined; - const operand_bigint = val.toBigInt(&buffer, target); + const operand_bigint = val.toBigInt(&buffer, mod); const limbs = try arena.alloc( std.math.big.Limb, @@ -1895,7 +1929,8 @@ pub const Value = extern union { /// Asserts the value is an integer and not undefined. /// Returns the number of bits the value requires to represent stored in twos complement form. - pub fn intBitCountTwosComp(self: Value, target: Target) usize { + pub fn intBitCountTwosComp(self: Value, mod: *const Module) usize { + const target = mod.getTarget(); switch (self.tag()) { .zero, .bool_false, @@ -1926,7 +1961,7 @@ pub const Value = extern union { else => { var buffer: BigIntSpace = undefined; - return self.toBigInt(&buffer, target).bitCountTwosComp(); + return self.toBigInt(&buffer, mod).bitCountTwosComp(); }, } } @@ -1962,12 +1997,13 @@ pub const Value = extern union { }; } - pub fn orderAgainstZero(lhs: Value) std.math.Order { - return orderAgainstZeroAdvanced(lhs, null) catch unreachable; + pub fn orderAgainstZero(lhs: Value, mod: *const Module) std.math.Order { + return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable; } pub fn orderAgainstZeroAdvanced( lhs: Value, + mod: *const Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { return switch (lhs.tag()) { @@ -1991,7 +2027,7 @@ pub const Value = extern union { // This is needed to correctly handle hashing the value. // Checks in Sema should prevent direct comparisons from reaching here. const val = lhs.castTag(.runtime_value).?.data; - return val.orderAgainstZeroAdvanced(opt_sema); + return val.orderAgainstZeroAdvanced(mod, opt_sema); }, .int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0), .int_i64 => std.math.order(lhs.castTag(.int_i64).?.data, 0), @@ -2001,7 +2037,7 @@ pub const Value = extern union { .lazy_align => { const ty = lhs.castTag(.lazy_align).?.data; const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }) { @@ -2013,7 +2049,7 @@ pub const Value = extern union { .lazy_size => { const ty = lhs.castTag(.lazy_size).?.data; const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }) { @@ -2031,7 +2067,7 @@ pub const Value = extern union { .elem_ptr => { const elem_ptr = lhs.castTag(.elem_ptr).?.data; - switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(opt_sema)) { + switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(mod, opt_sema)) { .lt => unreachable, .gt => return .gt, .eq => { @@ -2049,17 +2085,17 @@ pub const Value = extern union { } /// Asserts the value is comparable. - pub fn order(lhs: Value, rhs: Value, target: Target) std.math.Order { - return orderAdvanced(lhs, rhs, target, null) catch unreachable; + pub fn order(lhs: Value, rhs: Value, mod: *const Module) std.math.Order { + return orderAdvanced(lhs, rhs, mod, null) catch unreachable; } /// Asserts the value is comparable. /// If opt_sema is null then this function asserts things are resolved and cannot fail. - pub fn orderAdvanced(lhs: Value, rhs: Value, target: Target, opt_sema: ?*Sema) !std.math.Order { + pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *const Module, opt_sema: ?*Sema) !std.math.Order { const lhs_tag = lhs.tag(); const rhs_tag = rhs.tag(); - const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(opt_sema); - const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(opt_sema); + const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); + const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema); switch (lhs_against_zero) { .lt => if (rhs_against_zero != .lt) return .lt, .eq => return rhs_against_zero.invert(), @@ -2093,22 +2129,22 @@ pub const Value = extern union { var lhs_bigint_space: BigIntSpace = undefined; var rhs_bigint_space: BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, target, opt_sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, target, opt_sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, opt_sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, opt_sema); return lhs_bigint.order(rhs_bigint); } /// Asserts the value is comparable. Does not take a type parameter because it supports /// comparisons between heterogeneous types. - pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, target: Target) bool { - return compareHeteroAdvanced(lhs, op, rhs, target, null) catch unreachable; + pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *const Module) bool { + return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable; } pub fn compareHeteroAdvanced( lhs: Value, op: std.math.CompareOperator, rhs: Value, - target: Target, + mod: *const Module, opt_sema: ?*Sema, ) !bool { if (lhs.pointerDecl()) |lhs_decl| { @@ -2132,20 +2168,20 @@ pub const Value = extern union { else => {}, } } - return (try orderAdvanced(lhs, rhs, target, opt_sema)).compare(op); + return (try orderAdvanced(lhs, rhs, mod, opt_sema)).compare(op); } /// Asserts the values are comparable. Both operands have type `ty`. /// For vectors, returns true if comparison is true for ALL elements. pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool { - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < ty.vectorLen()) : (i += 1) { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(), mod)) { + if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod), mod)) { return false; } } @@ -2165,7 +2201,7 @@ pub const Value = extern union { return switch (op) { .eq => lhs.eql(rhs, ty, mod), .neq => !lhs.eql(rhs, ty, mod), - else => compareHetero(lhs, op, rhs, mod.getTarget()), + else => compareHetero(lhs, op, rhs, mod), }; } @@ -2231,7 +2267,7 @@ pub const Value = extern union { .float_128 => if (std.math.isNan(lhs.castTag(.float_128).?.data)) return op == .neq, else => {}, } - return (try orderAgainstZeroAdvanced(lhs, opt_sema)).compare(op); + return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); } pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool { @@ -2346,7 +2382,7 @@ pub const Value = extern union { return true; } - if (ty.zigTypeTag() == .Struct) { + if (ty.zigTypeTag(mod) == .Struct) { const fields = ty.structFields().values(); assert(fields.len == a_field_vals.len); for (fields, 0..) |field, i| { @@ -2406,12 +2442,10 @@ pub const Value = extern union { return false; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Type => { - var buf_a: ToTypeBuffer = undefined; - var buf_b: ToTypeBuffer = undefined; - const a_type = a.toType(&buf_a); - const b_type = b.toType(&buf_b); + const a_type = a.toType(); + const b_type = b.toType(); return a_type.eql(b_type, mod); }, .Enum => { @@ -2419,8 +2453,7 @@ pub const Value = extern union { var buf_b: Payload.U64 = undefined; const a_val = a.enumToInt(ty, &buf_a); const b_val = b.enumToInt(ty, &buf_b); - var buf_ty: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&buf_ty); + const int_ty = ty.intTagType(); return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, .Array, .Vector => { @@ -2466,11 +2499,11 @@ pub const Value = extern union { // .the_one_possible_value, // .aggregate, // Note that we already checked above for matching tags, e.g. both .aggregate. - return ty.onePossibleValue() != null; + return ty.onePossibleValue(mod) != null; }, .Union => { // Here we have to check for value equality, as-if `a` has been coerced to `ty`. - if (ty.onePossibleValue() != null) { + if (ty.onePossibleValue(mod) != null) { return true; } if (a_ty.castTag(.anon_struct)) |payload| { @@ -2533,13 +2566,13 @@ pub const Value = extern union { else => {}, } if (a_tag == .null_value or a_tag == .@"error") return false; - return (try orderAdvanced(a, b, target, opt_sema)).compare(.eq); + return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq); } /// This function is used by hash maps and so treats floating-point NaNs as equal /// to each other, and not equal to other floating-point values. pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - const zig_ty_tag = ty.zigTypeTag(); + const zig_ty_tag = ty.zigTypeTag(mod); std.hash.autoHash(hasher, zig_ty_tag); if (val.isUndef()) return; // The value is runtime-known and shouldn't affect the hash. @@ -2555,8 +2588,7 @@ pub const Value = extern union { => {}, .Type => { - var buf: ToTypeBuffer = undefined; - return val.toType(&buf).hashWithHasher(hasher, mod); + return val.toType().hashWithHasher(hasher, mod); }, .Float => { // For hash/eql purposes, we treat floats as their IEEE integer representation. @@ -2588,7 +2620,7 @@ pub const Value = extern union { hash(slice.len, Type.usize, hasher, mod); }, - else => return hashPtr(val, hasher, mod.getTarget()), + else => return hashPtr(val, hasher, mod), }, .Array, .Vector => { const len = ty.arrayLen(); @@ -2648,7 +2680,7 @@ pub const Value = extern union { .Enum => { var enum_space: Payload.U64 = undefined; const int_val = val.enumToInt(ty, &enum_space); - hashInt(int_val, hasher, mod.getTarget()); + hashInt(int_val, hasher, mod); }, .Union => { const union_obj = val.cast(Payload.Union).?.data; @@ -2691,7 +2723,7 @@ pub const Value = extern union { // The value is runtime-known and shouldn't affect the hash. if (val.tag() == .runtime_value) return; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Opaque => unreachable, // Cannot hash opaque types .Void, .NoReturn, @@ -2700,8 +2732,7 @@ pub const Value = extern union { .Struct, // It sure would be nice to do something clever with structs. => |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag), .Type => { - var buf: ToTypeBuffer = undefined; - val.toType(&buf).hashWithHasher(hasher, mod); + val.toType().hashWithHasher(hasher, mod); }, .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))), .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { @@ -2711,7 +2742,7 @@ pub const Value = extern union { const ptr_ty = ty.slicePtrFieldType(&ptr_buf); slice.ptr.hashUncoerced(ptr_ty, hasher, mod); }, - else => val.hashPtr(hasher, mod.getTarget()), + else => val.hashPtr(hasher, mod), }, .Array, .Vector => { const len = ty.arrayLen(); @@ -2821,16 +2852,16 @@ pub const Value = extern union { }; } - fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, target: Target) void { + fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void { var buffer: BigIntSpace = undefined; - const big = int_val.toBigInt(&buffer, target); + const big = int_val.toBigInt(&buffer, mod); std.hash.autoHash(hasher, big.positive); for (big.limbs) |limb| { std.hash.autoHash(hasher, limb); } } - fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, target: Target) void { + fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void { switch (ptr_val.tag()) { .decl_ref, .decl_ref_mut, @@ -2847,25 +2878,25 @@ pub const Value = extern union { .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - hashPtr(elem_ptr.array_ptr, hasher, target); + hashPtr(elem_ptr.array_ptr, hasher, mod); std.hash.autoHash(hasher, Value.Tag.elem_ptr); std.hash.autoHash(hasher, elem_ptr.index); }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; std.hash.autoHash(hasher, Value.Tag.field_ptr); - hashPtr(field_ptr.container_ptr, hasher, target); + hashPtr(field_ptr.container_ptr, hasher, mod); std.hash.autoHash(hasher, field_ptr.field_index); }, .eu_payload_ptr => { const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; std.hash.autoHash(hasher, Value.Tag.eu_payload_ptr); - hashPtr(err_union_ptr.container_ptr, hasher, target); + hashPtr(err_union_ptr.container_ptr, hasher, mod); }, .opt_payload_ptr => { const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; std.hash.autoHash(hasher, Value.Tag.opt_payload_ptr); - hashPtr(opt_ptr.container_ptr, hasher, target); + hashPtr(opt_ptr.container_ptr, hasher, mod); }, .zero, @@ -2880,7 +2911,7 @@ pub const Value = extern union { .the_only_possible_value, .lazy_align, .lazy_size, - => return hashInt(ptr_val, hasher, target), + => return hashInt(ptr_val, hasher, mod), else => unreachable, } @@ -2897,11 +2928,11 @@ pub const Value = extern union { pub fn sliceLen(val: Value, mod: *Module) u64 { return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod.getTarget()), + .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod), .decl_ref => { const decl_index = val.castTag(.decl_ref).?.data; const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag() == .Array) { + if (decl.ty.zigTypeTag(mod) == .Array) { return decl.ty.arrayLen(); } else { return 1; @@ -2910,7 +2941,7 @@ pub const Value = extern union { .decl_ref_mut => { const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag() == .Array) { + if (decl.ty.zigTypeTag(mod) == .Array) { return decl.ty.arrayLen(); } else { return 1; @@ -2918,7 +2949,7 @@ pub const Value = extern union { }, .comptime_field_ptr => { const payload = val.castTag(.comptime_field_ptr).?.data; - if (payload.field_ty.zigTypeTag() == .Array) { + if (payload.field_ty.zigTypeTag(mod) == .Array) { return payload.field_ty.arrayLen(); } else { return 1; @@ -3003,7 +3034,7 @@ pub const Value = extern union { if (data.container_ptr.pointerDecl()) |decl_index| { const container_decl = mod.declPtr(decl_index); const field_type = data.container_ty.structFieldType(data.field_index); - const field_val = container_decl.val.fieldValue(field_type, data.field_index); + const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); return field_val.elemValueAdvanced(mod, index, arena, buffer); } else unreachable; }, @@ -3032,10 +3063,7 @@ pub const Value = extern union { } /// Returns true if a Value is backed by a variable - pub fn isVariable( - val: Value, - mod: *Module, - ) bool { + pub fn isVariable(val: Value, mod: *Module) bool { return switch (val.tag()) { .slice => val.castTag(.slice).?.data.ptr.isVariable(mod), .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod), @@ -3119,7 +3147,7 @@ pub const Value = extern union { }; } - pub fn fieldValue(val: Value, ty: Type, index: usize) Value { + pub fn fieldValue(val: Value, ty: Type, mod: *const Module, index: usize) Value { switch (val.tag()) { .aggregate => { const field_values = val.castTag(.aggregate).?.data; @@ -3131,14 +3159,14 @@ pub const Value = extern union { return payload.val; }, - .the_only_possible_value => return ty.onePossibleValue().?, + .the_only_possible_value => return ty.onePossibleValue(mod).?, .empty_struct_value => { if (ty.isSimpleTupleOrAnonStruct()) { const tuple = ty.tupleFields(); return tuple.values[index]; } - if (ty.structFieldValueComptime(index)) |some| { + if (ty.structFieldValueComptime(mod, index)) |some| { return some; } unreachable; @@ -3165,7 +3193,7 @@ pub const Value = extern union { index: usize, mod: *Module, ) Allocator.Error!Value { - const elem_ty = ty.elemType2(); + const elem_ty = ty.elemType2(mod); const ptr_val = switch (val.tag()) { .slice => val.castTag(.slice).?.data.ptr, else => val, @@ -3207,7 +3235,7 @@ pub const Value = extern union { switch (self.tag()) { .slice => { const payload = self.castTag(.slice).?; - const len = payload.data.len.toUnsignedInt(mod.getTarget()); + const len = payload.data.len.toUnsignedInt(mod); var elem_value_buf: ElemValueBuffer = undefined; var i: usize = 0; @@ -3233,7 +3261,7 @@ pub const Value = extern union { /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. - pub fn isNull(self: Value) bool { + pub fn isNull(self: Value, mod: *const Module) bool { return switch (self.tag()) { .null_value => true, .opt_payload => false, @@ -3254,7 +3282,7 @@ pub const Value = extern union { .int_i64, .int_big_positive, .int_big_negative, - => self.orderAgainstZero().compare(.eq), + => self.orderAgainstZero(mod).compare(.eq), .undef => unreachable, .unreachable_value => unreachable, @@ -3300,8 +3328,8 @@ pub const Value = extern union { } /// Value of the optional, null if optional has no payload. - pub fn optionalValue(val: Value) ?Value { - if (val.isNull()) return null; + pub fn optionalValue(val: Value, mod: *const Module) ?Value { + if (val.isNull(mod)) return null; // Valid for optional representation to be the direct value // and not use opt_payload. @@ -3333,20 +3361,20 @@ pub const Value = extern union { } pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - const target = mod.getTarget(); - if (int_ty.zigTypeTag() == .Vector) { + if (int_ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, int_ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(), target, opt_sema); + scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(mod), mod, opt_sema); } return Value.Tag.aggregate.create(arena, result_data); } - return intToFloatScalar(val, arena, float_ty, target, opt_sema); + return intToFloatScalar(val, arena, float_ty, mod, opt_sema); } - pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, target: Target, opt_sema: ?*Sema) !Value { + pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { + const target = mod.getTarget(); switch (val.tag()) { .undef, .zero, .one => return val, .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 @@ -3369,17 +3397,17 @@ pub const Value = extern union { .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { - return intToFloatInner((try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar, arena, float_ty, target); + return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); } else { - return intToFloatInner(ty.abiAlignment(target), arena, float_ty, target); + return intToFloatInner(ty.abiAlignment(mod), arena, float_ty, target); } }, .lazy_size => { const ty = val.castTag(.lazy_size).?.data; if (opt_sema) |sema| { - return intToFloatInner((try ty.abiSizeAdvanced(target, .{ .sema = sema })).scalar, arena, float_ty, target); + return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); } else { - return intToFloatInner(ty.abiSize(target), arena, float_ty, target); + return intToFloatInner(ty.abiSize(mod), arena, float_ty, target); } }, else => unreachable, @@ -3446,19 +3474,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return intAddSatScalar(lhs, rhs, ty, arena, target); + return intAddSatScalar(lhs, rhs, ty, arena, mod); } /// Supports integers only; asserts neither operand is undefined. @@ -3467,17 +3494,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { assert(!lhs.isUndef()); assert(!rhs.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -3495,19 +3522,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return intSubSatScalar(lhs, rhs, ty, arena, target); + return intSubSatScalar(lhs, rhs, ty, arena, mod); } /// Supports integers only; asserts neither operand is undefined. @@ -3516,17 +3542,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { assert(!lhs.isUndef()); assert(!rhs.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -3543,8 +3569,7 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !OverflowArithmeticResult { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try arena.alloc(Value, ty.vectorLen()); const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { @@ -3552,7 +3577,7 @@ pub const Value = extern union { var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -3561,7 +3586,7 @@ pub const Value = extern union { .wrapped_result = try Value.Tag.aggregate.create(arena, result_data), }; } - return intMulWithOverflowScalar(lhs, rhs, ty, arena, target); + return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod); } pub fn intMulWithOverflowScalar( @@ -3569,14 +3594,14 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !OverflowArithmeticResult { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -3607,14 +3632,14 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); + scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3631,7 +3656,7 @@ pub const Value = extern union { ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); - if (ty.zigTypeTag() == .ComptimeInt) { + if (ty.zigTypeTag(mod) == .ComptimeInt) { return intMul(lhs, rhs, ty, arena, mod); } @@ -3651,19 +3676,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return intMulSatScalar(lhs, rhs, ty, arena, target); + return intMulSatScalar(lhs, rhs, ty, arena, mod); } /// Supports (vectors of) integers only; asserts neither operand is undefined. @@ -3672,17 +3696,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { assert(!lhs.isUndef()); assert(!rhs.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.max( @@ -3702,24 +3726,24 @@ pub const Value = extern union { } /// Supports both floats and ints; handles undefined. - pub fn numberMax(lhs: Value, rhs: Value, target: Target) Value { + pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value { if (lhs.isUndef() or rhs.isUndef()) return undef; if (lhs.isNan()) return rhs; if (rhs.isNan()) return lhs; - return switch (order(lhs, rhs, target)) { + return switch (order(lhs, rhs, mod)) { .lt => rhs, .gt, .eq => lhs, }; } /// Supports both floats and ints; handles undefined. - pub fn numberMin(lhs: Value, rhs: Value, target: Target) Value { + pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value { if (lhs.isUndef() or rhs.isUndef()) return undef; if (lhs.isNan()) return rhs; if (rhs.isNan()) return lhs; - return switch (order(lhs, rhs, target)) { + return switch (order(lhs, rhs, mod)) { .lt => lhs, .gt, .eq => rhs, }; @@ -3727,24 +3751,23 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(), arena, target); + scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return bitwiseNotScalar(val, ty, arena, target); + return bitwiseNotScalar(val, ty, arena, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, target: Target) !Value { + pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (val.isUndef()) return Value.initTag(.undef); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); if (info.bits == 0) { return val; @@ -3753,7 +3776,7 @@ pub const Value = extern union { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var val_space: Value.BigIntSpace = undefined; - const val_bigint = val.toBigInt(&val_space, target); + const val_bigint = val.toBigInt(&val_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -3766,31 +3789,30 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseAndScalar(lhs, rhs, allocator, target); + return bitwiseAndScalar(lhs, rhs, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value { + pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives @@ -3803,14 +3825,14 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); + scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3823,41 +3845,40 @@ pub const Value = extern union { const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - const all_ones = if (ty.isSignedInt()) + const all_ones = if (ty.isSignedInt(mod)) try Value.Tag.int_i64.create(arena, -1) else - try ty.maxInt(arena, mod.getTarget()); + try ty.maxInt(arena, mod); return bitwiseXor(anded, all_ones, ty, arena, mod); } /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseOrScalar(lhs, rhs, allocator, target); + return bitwiseOrScalar(lhs, rhs, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value { + pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), @@ -3869,31 +3890,30 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseXorScalar(lhs, rhs, allocator, target); + return bitwiseXorScalar(lhs, rhs, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value { + pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives @@ -3905,28 +3925,27 @@ pub const Value = extern union { } pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intDivScalar(lhs, rhs, allocator, target); + return intDivScalar(lhs, rhs, allocator, mod); } - pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -3946,28 +3965,27 @@ pub const Value = extern union { } pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intDivFloorScalar(lhs, rhs, allocator, target); + return intDivFloorScalar(lhs, rhs, allocator, mod); } - pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -3987,28 +4005,27 @@ pub const Value = extern union { } pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intModScalar(lhs, rhs, allocator, target); + return intModScalar(lhs, rhs, allocator, mod); } - pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -4064,14 +4081,14 @@ pub const Value = extern union { pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4111,14 +4128,14 @@ pub const Value = extern union { pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4157,28 +4174,27 @@ pub const Value = extern union { } pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intMulScalar(lhs, rhs, allocator, target); + return intMulScalar(lhs, rhs, allocator, mod); } - pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -4194,17 +4210,16 @@ pub const Value = extern union { } pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, target); + scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intTruncScalar(val, allocator, signedness, bits, target); + return intTruncScalar(val, allocator, signedness, bits, mod); } /// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`. @@ -4216,26 +4231,25 @@ pub const Value = extern union { bits: Value, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); var bits_buf: Value.ElemValueBuffer = undefined; const bits_elem = bits.elemValueBuffer(mod, i, &bits_buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(target)), target); + scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(target)), target); + return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); } - pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, target: Target) !Value { + pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { if (bits == 0) return Value.zero; var val_space: Value.BigIntSpace = undefined; - const val_bigint = val.toBigInt(&val_space, target); + const val_bigint = val.toBigInt(&val_space, mod); const limbs = try allocator.alloc( std.math.big.Limb, @@ -4248,27 +4262,26 @@ pub const Value = extern union { } pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return shlScalar(lhs, rhs, allocator, target); + return shlScalar(lhs, rhs, allocator, mod); } - pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -4289,8 +4302,7 @@ pub const Value = extern union { allocator: Allocator, mod: *Module, ) !OverflowArithmeticResult { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try allocator.alloc(Value, ty.vectorLen()); const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { @@ -4298,7 +4310,7 @@ pub const Value = extern union { var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), allocator, target); + const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -4307,7 +4319,7 @@ pub const Value = extern union { .wrapped_result = try Value.Tag.aggregate.create(allocator, result_data), }; } - return shlWithOverflowScalar(lhs, rhs, ty, allocator, target); + return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod); } pub fn shlWithOverflowScalar( @@ -4315,12 +4327,12 @@ pub const Value = extern union { rhs: Value, ty: Type, allocator: Allocator, - target: Target, + mod: *Module, ) !OverflowArithmeticResult { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -4348,19 +4360,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return shlSatScalar(lhs, rhs, ty, arena, target); + return shlSatScalar(lhs, rhs, ty, arena, mod); } pub fn shlSatScalar( @@ -4368,15 +4379,15 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits) + 1, @@ -4397,14 +4408,14 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); + scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4419,33 +4430,32 @@ pub const Value = extern union { mod: *Module, ) !Value { const shifted = try lhs.shl(rhs, ty, arena, mod); - const int_info = ty.intInfo(mod.getTarget()); + const int_info = ty.intInfo(mod); const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, mod); return truncated; } pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return shrScalar(lhs, rhs, allocator, target); + return shrScalar(lhs, rhs, allocator, mod); } - pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8)); if (result_limbs == 0) { @@ -4478,12 +4488,12 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floatNegScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4514,14 +4524,14 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4573,14 +4583,14 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4632,14 +4642,14 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4691,14 +4701,14 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4744,12 +4754,12 @@ pub const Value = extern union { pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sqrtScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4784,12 +4794,12 @@ pub const Value = extern union { pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sinScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4824,12 +4834,12 @@ pub const Value = extern union { pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try cosScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4864,12 +4874,12 @@ pub const Value = extern union { pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try tanScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4904,12 +4914,12 @@ pub const Value = extern union { pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try expScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try expScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4944,12 +4954,12 @@ pub const Value = extern union { pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try exp2Scalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4984,12 +4994,12 @@ pub const Value = extern union { pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try logScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try logScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5024,12 +5034,12 @@ pub const Value = extern union { pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log2Scalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5064,12 +5074,12 @@ pub const Value = extern union { pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log10Scalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5104,12 +5114,12 @@ pub const Value = extern union { pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try fabsScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5144,12 +5154,12 @@ pub const Value = extern union { pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floorScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5184,12 +5194,12 @@ pub const Value = extern union { pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try ceilScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5224,12 +5234,12 @@ pub const Value = extern union { pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try roundScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5264,12 +5274,12 @@ pub const Value = extern union { pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try truncScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5311,7 +5321,7 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var mulend1_buf: Value.ElemValueBuffer = undefined; @@ -5321,7 +5331,7 @@ pub const Value = extern union { var addend_buf: Value.ElemValueBuffer = undefined; const addend_elem = addend.elemValueBuffer(mod, i, &addend_buf); scalar.* = try mulAddScalar( - float_type.scalarType(), + float_type.scalarType(mod), mulend1_elem, mulend2_elem, addend_elem, @@ -5380,8 +5390,7 @@ pub const Value = extern union { /// If the value is represented in-memory as a series of bytes that all /// have the same value, return that byte value, otherwise null. pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module, value_buffer: *Payload.U64) !?Value { - const target = mod.getTarget(); - const abi_size = std.math.cast(usize, ty.abiSize(target)) orelse return null; + const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null; assert(abi_size >= 1); const byte_buffer = try mod.gpa.alloc(u8, abi_size); defer mod.gpa.free(byte_buffer); @@ -5549,16 +5558,6 @@ pub const Value = extern union { data: Type, }; - pub const IntType = struct { - pub const base_tag = Tag.int_type; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - bits: u16, - signed: bool, - }, - }; - pub const Float_16 = struct { pub const base_tag = Tag.float_16; @@ -5659,7 +5658,10 @@ pub const Value = extern union { pub const zero = initTag(.zero); pub const one = initTag(.one); - pub const negative_one: Value = .{ .ptr_otherwise = &negative_one_payload.base }; + pub const negative_one: Value = .{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &negative_one_payload.base }, + }; pub const undef = initTag(.undef); pub const @"void" = initTag(.void_value); pub const @"null" = initTag(.null_value); From cac60a05a7c253e197b26b7d707103feefb850f9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 1 May 2023 20:14:17 -0700 Subject: [PATCH 002/205] std.builtin: give some enums integer types --- lib/std/builtin.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index ec69270d1520..ff6d20370ca4 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -190,7 +190,7 @@ pub const CallingConvention = enum { /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. -pub const AddressSpace = enum { +pub const AddressSpace = enum(u4) { generic, gs, fs, @@ -283,7 +283,7 @@ pub const Type = union(enum) { /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. - pub const Size = enum { + pub const Size = enum(u2) { One, Many, Slice, From c7e84ddb722df0403dd6ae8283820c02efc77e50 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 1 May 2023 20:17:21 -0700 Subject: [PATCH 003/205] InternPool: flesh out some of the implementations * hashing * equality * encoding --- src/InternPool.zig | 395 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 358 insertions(+), 37 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index b835315e5a1f..cc3f0c1e4b43 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3,6 +3,9 @@ map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, +/// On 32-bit systems, this array is ignored and extra is used for everything. +/// On 64-bit systems, this array is used for big integers and associated metadata. +limbs: std.ArrayListUnmanaged(u64) = .{}, const std = @import("std"); const Allocator = std.mem.Allocator; @@ -91,19 +94,43 @@ pub const Key = union(enum) { } pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash) void { + const KeyTag = @typeInfo(Key).Union.tag_type.?; + const key_tag: KeyTag = key; + std.hash.autoHash(hasher, key_tag); switch (key) { - .int_type => |int_type| { - std.hash.autoHash(hasher, int_type); + inline .int_type, + .ptr_type, + .array_type, + .vector_type, + .optional_type, + .error_union_type, + .simple_type, + .simple_value, + .extern_func, + => |info| std.hash.autoHash(hasher, info), + + .int => |int| { + std.hash.autoHash(hasher, int.ty); + std.hash.autoHash(hasher, int.big_int.positive); + for (int.big_int.limbs) |limb| std.hash.autoHash(hasher, limb); }, - .array_type => |array_type| { - std.hash.autoHash(hasher, array_type); + + .enum_tag => |enum_tag| { + std.hash.autoHash(hasher, enum_tag.ty); + std.hash.autoHash(hasher, enum_tag.tag.positive); + for (enum_tag.tag.limbs) |limb| std.hash.autoHash(hasher, limb); + }, + + .struct_type => |struct_type| { + if (struct_type.fields_len != 0) { + @panic("TODO"); + } }, - else => @panic("TODO"), } } pub fn eql(a: Key, b: Key) bool { - const KeyTag = std.meta.Tag(Key); + const KeyTag = @typeInfo(Key).Union.tag_type.?; const a_tag: KeyTag = a; const b_tag: KeyTag = b; if (a_tag != b_tag) return false; @@ -112,11 +139,62 @@ pub const Key = union(enum) { const b_info = b.int_type; return std.meta.eql(a_info, b_info); }, + .ptr_type => |a_info| { + const b_info = b.ptr_type; + return std.meta.eql(a_info, b_info); + }, .array_type => |a_info| { const b_info = b.array_type; return std.meta.eql(a_info, b_info); }, - else => @panic("TODO"), + .vector_type => |a_info| { + const b_info = b.vector_type; + return std.meta.eql(a_info, b_info); + }, + .optional_type => |a_info| { + const b_info = b.optional_type; + return std.meta.eql(a_info, b_info); + }, + .error_union_type => |a_info| { + const b_info = b.error_union_type; + return std.meta.eql(a_info, b_info); + }, + .simple_type => |a_info| { + const b_info = b.simple_type; + return a_info == b_info; + }, + .simple_value => |a_info| { + const b_info = b.simple_value; + return a_info == b_info; + }, + .extern_func => |a_info| { + const b_info = b.extern_func; + return std.meta.eql(a_info, b_info); + }, + + .int => |a_info| { + const b_info = b.int; + _ = a_info; + _ = b_info; + @panic("TODO"); + }, + + .enum_tag => |a_info| { + const b_info = b.enum_tag; + _ = a_info; + _ = b_info; + @panic("TODO"); + }, + + .struct_type => |a_info| { + const b_info = b.struct_type; + + // TODO: remove this special case for empty_struct + if (a_info.fields_len == 0 and b_info.fields_len == 0) + return true; + + @panic("TODO"); + }, } } @@ -491,27 +569,65 @@ pub const Tag = enum(u8) { /// An integer type. /// data is number of bits type_int_unsigned, - /// An array type. - /// data is payload to Array. + /// An array type that has no sentinel and whose length fits in 32 bits. + /// data is payload to Vector. type_array, + /// A vector type. + /// data is payload to Vector. + type_vector, + /// A pointer type along with all its bells and whistles. + /// data is payload to Pointer. + type_pointer, + /// An optional type. + /// data is the child type. + type_optional, + /// An error union type. + /// data is payload to ErrorUnion. + type_error_union, + /// Represents the data that an enum declaration provides, when the fields + /// are auto-numbered, and there are no declarations. + /// data is payload index to `EnumSimple`. + type_enum_simple, + /// A type that can be represented with only an enum tag. /// data is SimpleType enum value. simple_type, /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, - /// An unsigned integer value that can be represented by u32. + /// The SimpleType and SimpleValue enums are exposed via the InternPool API using + /// SimpleType and SimpleValue as the Key data themselves. + /// This tag is for miscellaneous types and values that can be represented with + /// only an enum tag, but will be presented via the API with a different Key. + /// data is SimpleInternal enum value. + simple_internal, + /// Type: u32 /// data is integer value - int_u32, - /// An unsigned integer value that can be represented by i32. + int_small_u32, + /// Type: i32 /// data is integer value bitcasted to u32. - int_i32, - /// A positive integer value that does not fit in 32 bits. - /// data is a extra index to BigInt. - int_big_positive, - /// A negative integer value that does not fit in 32 bits. - /// data is a extra index to BigInt. - int_big_negative, + int_small_i32, + /// A usize that fits in 32 bits. + /// data is integer value. + int_small_usize, + /// A comptime_int that fits in a u32. + /// data is integer value. + int_small_comptime_unsigned, + /// A comptime_int that fits in an i32. + /// data is integer value bitcasted to u32. + int_small_comptime_signed, + /// A positive integer value. + /// data is a limbs index to Int. + int_positive, + /// A negative integer value. + /// data is a limbs index to Int. + int_negative, + /// An enum tag identified by a positive integer value. + /// data is a limbs index to Int. + enum_tag_positive, + /// An enum tag identified by a negative integer value. + /// data is a limbs index to Int. + enum_tag_negative, /// A float value that can be represented by f32. /// data is float value bitcasted to u32. float_f32, @@ -525,10 +641,6 @@ pub const Tag = enum(u8) { extern_func, /// A regular function. func, - /// Represents the data that an enum declaration provides, when the fields - /// are auto-numbered, and there are no declarations. - /// data is payload index to `EnumSimple`. - enum_simple, }; /// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to @@ -593,11 +705,43 @@ pub const SimpleValue = enum(u32) { generic_poison, }; -pub const Array = struct { +pub const SimpleInternal = enum(u32) { + /// This is the empty struct type. Note that empty_struct value is exposed + /// via SimpleValue. + type_empty_struct, +}; + +pub const Pointer = struct { + child: Index, + sentinel: Index, + flags: Flags, + + pub const Flags = packed struct(u32) { + alignment: u16, + is_const: bool, + is_volatile: bool, + is_allowzero: bool, + size: Size, + address_space: AddressSpace, + _: u7 = undefined, + }; + + pub const Size = std.builtin.Type.Pointer.Size; + pub const AddressSpace = std.builtin.AddressSpace; +}; + +/// Used for non-sentineled arrays that have length fitting in u32, as well as +/// vectors. +pub const Vector = struct { len: u32, child: Index, }; +pub const ErrorUnion = struct { + error_set_type: Index, + payload_type: Index, +}; + /// Trailing: /// 0. field name: null-terminated string index for each fields_len; declaration order pub const EnumSimple = struct { @@ -612,6 +756,12 @@ pub const EnumSimple = struct { fields_len: u32, }; +/// Trailing: Limb for every limbs_len +pub const Int = struct { + ty: Index, + limbs_len: u32, +}; + pub fn init(ip: *InternPool, gpa: Allocator) !void { assert(ip.items.len == 0); @@ -619,6 +769,7 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void { try ip.items.ensureUnusedCapacity(gpa, static_keys.len); try ip.map.ensureUnusedCapacity(gpa, static_keys.len); try ip.extra.ensureUnusedCapacity(gpa, static_keys.len); + try ip.limbs.ensureUnusedCapacity(gpa, 2); // This inserts all the statically-known values into the intern pool in the // order expected. @@ -635,6 +786,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.map.deinit(gpa); ip.items.deinit(gpa); ip.extra.deinit(gpa); + ip.limbs.deinit(gpa); ip.* = undefined; } @@ -655,7 +807,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }, }, .type_array => { - const array_info = ip.extraData(Array, data); + const array_info = ip.extraData(Vector, data); return .{ .array_type = .{ .len = array_info.len, .child = array_info.child, @@ -675,58 +827,226 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { if (gop.found_existing) { return @intToEnum(Index, gop.index); } + try ip.items.ensureUnusedCapacity(gpa, 1); switch (key) { .int_type => |int_type| { const t: Tag = switch (int_type.signedness) { .signed => .type_int_signed, .unsigned => .type_int_unsigned, }; - try ip.items.append(gpa, .{ + ip.items.appendAssumeCapacity(.{ .tag = t, .data = int_type.bits, }); }, + .ptr_type => |ptr_type| { + // TODO introduce more pointer encodings + ip.items.appendAssumeCapacity(.{ + .tag = .type_pointer, + .data = try ip.addExtra(gpa, Pointer{ + .child = ptr_type.elem_type, + .sentinel = ptr_type.sentinel, + .flags = .{ + .alignment = ptr_type.alignment, + .is_const = ptr_type.is_const, + .is_volatile = ptr_type.is_volatile, + .is_allowzero = ptr_type.is_allowzero, + .size = ptr_type.size, + .address_space = ptr_type.address_space, + }, + }), + }); + }, .array_type => |array_type| { const len = @intCast(u32, array_type.len); // TODO have a big_array encoding assert(array_type.sentinel == .none); // TODO have a sentinel_array encoding - try ip.items.append(gpa, .{ + ip.items.appendAssumeCapacity(.{ .tag = .type_array, - .data = try ip.addExtra(gpa, Array{ + .data = try ip.addExtra(gpa, Vector{ .len = len, .child = array_type.child, }), }); }, - else => @panic("TODO"), + .vector_type => |vector_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_vector, + .data = try ip.addExtra(gpa, Vector{ + .len = vector_type.len, + .child = vector_type.child, + }), + }); + }, + .optional_type => |optional_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_optional, + .data = @enumToInt(optional_type.payload_type), + }); + }, + .error_union_type => |error_union_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_error_union, + .data = try ip.addExtra(gpa, ErrorUnion{ + .error_set_type = error_union_type.error_set_type, + .payload_type = error_union_type.payload_type, + }), + }); + }, + .simple_type => |simple_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .simple_type, + .data = @enumToInt(simple_type), + }); + }, + .simple_value => |simple_value| { + ip.items.appendAssumeCapacity(.{ + .tag = .simple_value, + .data = @enumToInt(simple_value), + }); + }, + .extern_func => @panic("TODO"), + + .int => |int| b: { + switch (int.ty) { + .u32_type => { + if (int.big_int.fits(u32)) { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small_u32, + .data = int.big_int.to(u32) catch unreachable, + }); + break :b; + } + }, + .i32_type => { + if (int.big_int.fits(i32)) { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small_i32, + .data = @bitCast(u32, int.big_int.to(i32) catch unreachable), + }); + break :b; + } + }, + .usize_type => { + if (int.big_int.fits(u32)) { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small_usize, + .data = int.big_int.to(u32) catch unreachable, + }); + break :b; + } + }, + .comptime_int_type => { + if (int.big_int.fits(u32)) { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small_comptime_unsigned, + .data = int.big_int.to(u32) catch unreachable, + }); + break :b; + } + if (int.big_int.fits(i32)) { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small_comptime_signed, + .data = @bitCast(u32, int.big_int.to(i32) catch unreachable), + }); + break :b; + } + }, + else => {}, + } + + const tag: Tag = if (int.big_int.positive) .int_positive else .int_negative; + try addInt(ip, gpa, int.ty, tag, int.big_int.limbs); + }, + + .enum_tag => |enum_tag| { + const tag: Tag = if (enum_tag.tag.positive) .enum_tag_positive else .enum_tag_negative; + try addInt(ip, gpa, enum_tag.ty, tag, enum_tag.tag.limbs); + }, + + .struct_type => |struct_type| { + if (struct_type.fields_len != 0) { + @panic("TODO"); // handle structs other than empty_struct + } + ip.items.appendAssumeCapacity(.{ + .tag = .simple_internal, + .data = @enumToInt(SimpleInternal.type_empty_struct), + }); + }, } return @intToEnum(Index, ip.items.len - 1); } -pub fn tag(ip: InternPool, index: Index) Tag { - const tags = ip.items.items(.tag); - return tags[@enumToInt(index)]; +fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const usize) !void { + const limbs_len = @intCast(u32, limbs.len); + try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); + ip.items.appendAssumeCapacity(.{ + .tag = tag, + .data = ip.addLimbsExtraAssumeCapacity(Int{ + .ty = ty, + .limbs_len = limbs_len, + }), + }); + ip.addLimbsAssumeCapacity(limbs); } fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32 { - const fields = std.meta.fields(@TypeOf(extra)); + const fields = @typeInfo(@TypeOf(extra)).Struct.fields; try ip.extra.ensureUnusedCapacity(gpa, fields.len); return ip.addExtraAssumeCapacity(extra); } fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { - const fields = std.meta.fields(@TypeOf(extra)); const result = @intCast(u32, ip.extra.items.len); - inline for (fields) |field| { + inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { ip.extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), Index => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), - else => @compileError("bad field type"), + Pointer.Flags => @bitCast(u32, @field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), }); } return result; } +fn reserveLimbs(ip: *InternPool, gpa: Allocator, n: usize) !void { + switch (@sizeOf(usize)) { + @sizeOf(u32) => try ip.extra.ensureUnusedCapacity(gpa, n), + @sizeOf(u64) => try ip.limbs.ensureUnusedCapacity(gpa, n), + else => @compileError("unsupported host"), + } +} + +fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { + switch (@sizeOf(usize)) { + @sizeOf(u32) => return addExtraAssumeCapacity(ip, extra), + @sizeOf(u64) => {}, + else => @compileError("unsupported host"), + } + const result = @intCast(u32, ip.extra.items.len); + inline for (@typeInfo(@TypeOf(extra)).Struct.fields, 0..) |field, i| { + const new: u32 = switch (field.type) { + u32 => @field(extra, field.name), + Index => @enumToInt(@field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }; + if (i % 2 == 0) { + ip.limbs.appendAssumeCapacity(new); + } else { + ip.limbs.items[ip.limbs.items.len - 1] |= @as(u64, new) << 32; + } + } + return result; +} + +fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const usize) void { + switch (@sizeOf(usize)) { + @sizeOf(u32) => ip.extra.appendSliceAssumeCapacity(limbs), + @sizeOf(u64) => ip.limbs.appendSliceAssumeCapacity(limbs), + else => @compileError("unsupported host"), + } +} + fn extraData(ip: InternPool, comptime T: type, index: usize) T { const fields = std.meta.fields(T); var i: usize = index; @@ -736,7 +1056,8 @@ fn extraData(ip: InternPool, comptime T: type, index: usize) T { u32 => ip.extra.items[i], Index => @intToEnum(Index, ip.extra.items[i]), i32 => @bitCast(i32, ip.extra.items[i]), - else => @compileError("bad field type"), + Pointer.Flags => @bitCast(Pointer.Flags, ip.extra.items[i]), + else => @compileError("bad field type: " ++ @typeName(field.type)), }; i += 1; } From 00f82f1c46126f1fc6655c6142ef16e8e5afbf4e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 2 May 2023 14:37:33 -0700 Subject: [PATCH 004/205] stage2: add `interned` AIR tag This required additionally passing the `InternPool` into some AIR methods. Also, implement `Type.isNoReturn` for interned types. --- src/Air.zig | 58 ++++--- src/InternPool.zig | 6 + src/Liveness.zig | 18 ++- src/Liveness/Verify.zig | 10 +- src/Module.zig | 3 +- src/Sema.zig | 92 ++--------- src/arch/aarch64/CodeGen.zig | 172 ++++++++++---------- src/arch/arm/CodeGen.zig | 166 ++++++++++--------- src/arch/riscv64/CodeGen.zig | 80 ++++++---- src/arch/sparc64/CodeGen.zig | 136 ++++++++-------- src/arch/wasm/CodeGen.zig | 240 +++++++++++++++------------- src/arch/x86_64/CodeGen.zig | 286 +++++++++++++++++---------------- src/codegen/c.zig | 268 ++++++++++++++++--------------- src/codegen/llvm.zig | 299 ++++++++++++++++++----------------- src/codegen/spirv.zig | 119 +++++++------- src/print_air.zig | 18 ++- src/type.zig | 40 +++-- 17 files changed, 1050 insertions(+), 961 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index b60e8eda9ddb..be3ae119e49b 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -11,6 +11,7 @@ const Air = @This(); const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const InternPool = @import("InternPool.zig"); +const Module = @import("Module.zig"); instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. @@ -401,6 +402,9 @@ pub const Inst = struct { constant, /// A comptime-known type. Uses the `ty` field. const_ty, + /// A comptime-known value via an index into the InternPool. + /// Uses the `interned` field. + interned, /// Notes the beginning of a source code statement and marks the line and column. /// Result type is always void. /// Uses the `dbg_stmt` field. @@ -928,6 +932,7 @@ pub const Inst = struct { pub const Data = union { no_op: void, un_op: Ref, + interned: InternPool.Index, bin_op: struct { lhs: Ref, @@ -1147,18 +1152,15 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[extra.end..][0..extra.data.body_len]; } -pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type { +pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: InternPool) Type { const ref_int = @enumToInt(inst); if (ref_int < InternPool.static_keys.len) { - return .{ - .ip_index = InternPool.static_keys[ref_int].typeOf(), - .legacy = undefined, - }; + return InternPool.static_keys[ref_int].typeOf().toType(); } - return air.typeOfIndex(ref_int - ref_start_index); + return air.typeOfIndex(ref_int - ref_start_index, ip); } -pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { +pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { const datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst]) { .add, @@ -1200,7 +1202,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .div_exact_optimized, .rem_optimized, .mod_optimized, - => return air.typeOf(datas[inst].bin_op.lhs), + => return air.typeOf(datas[inst].bin_op.lhs, ip), .sqrt, .sin, @@ -1218,7 +1220,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .trunc_float, .neg, .neg_optimized, - => return air.typeOf(datas[inst].un_op), + => return air.typeOf(datas[inst].un_op, ip), .cmp_lt, .cmp_lte, @@ -1280,6 +1282,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .try_ptr, => return air.getRefType(datas[inst].ty_pl.ty), + .interned => return ip.indexToKey(datas[inst].interned).typeOf().toType(), + .not, .bitcast, .load, @@ -1371,33 +1375,33 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0), .call, .call_always_tail, .call_never_tail, .call_never_inline => { - const callee_ty = air.typeOf(datas[inst].pl_op.operand); + const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); return callee_ty.fnReturnType(); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { - const ptr_ty = air.typeOf(datas[inst].bin_op.lhs); + const ptr_ty = air.typeOf(datas[inst].bin_op.lhs, ip); return ptr_ty.elemType(); }, .atomic_load => { - const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr); + const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr, ip); return ptr_ty.elemType(); }, .atomic_rmw => { - const ptr_ty = air.typeOf(datas[inst].pl_op.operand); + const ptr_ty = air.typeOf(datas[inst].pl_op.operand, ip); return ptr_ty.elemType(); }, - .reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand).childType(), + .reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand, ip).childType(), - .mul_add => return air.typeOf(datas[inst].pl_op.operand), + .mul_add => return air.typeOf(datas[inst].pl_op.operand, ip), .select => { const extra = air.extraData(Air.Bin, datas[inst].pl_op.payload).data; - return air.typeOf(extra.lhs); + return air.typeOf(extra.lhs, ip); }, .@"try" => { - const err_union_ty = air.typeOf(datas[inst].pl_op.operand); + const err_union_ty = air.typeOf(datas[inst].pl_op.operand, ip); return err_union_ty.errorUnionPayload(); }, @@ -1465,7 +1469,7 @@ pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { } /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const @import("Module.zig")) ?Value { +pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const Module) ?Value { const ref_int = @enumToInt(inst); if (ref_int < ref_start_index) { const ip_index = @intToEnum(InternPool.Index, ref_int); @@ -1476,7 +1480,7 @@ pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const @import("Module.zig")) ?V switch (air.instructions.items(.tag)[inst_index]) { .constant => return air.values[air_datas[inst_index].ty_pl.payload], .const_ty => unreachable, - else => return air.typeOfIndex(inst_index).onePossibleValue(mod), + else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), } } @@ -1489,10 +1493,11 @@ pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 { return bytes[0..end :0]; } -/// Returns whether the given instruction must always be lowered, for instance because it can cause -/// side effects. If an instruction does not need to be lowered, and Liveness determines its result -/// is unused, backends should avoid lowering it. -pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { +/// Returns whether the given instruction must always be lowered, for instance +/// because it can cause side effects. If an instruction does not need to be +/// lowered, and Liveness determines its result is unused, backends should +/// avoid lowering it. +pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { const data = air.instructions.items(.data)[inst]; return switch (air.instructions.items(.tag)[inst]) { .arg, @@ -1631,6 +1636,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { .cmp_vector_optimized, .constant, .const_ty, + .interned, .is_null, .is_non_null, .is_null_ptr, @@ -1699,8 +1705,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { => false, .assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0, - .load => air.typeOf(data.ty_op.operand).isVolatilePtr(), - .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs).isVolatilePtr(), - .atomic_load => air.typeOf(data.atomic_load.ptr).isVolatilePtr(), + .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtr(), + .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtr(), + .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtr(), }; } diff --git a/src/InternPool.zig b/src/InternPool.zig index cc3f0c1e4b43..eadaf0da5eeb 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -241,6 +241,11 @@ pub const Item = struct { /// When adding a tag to this enum, consider adding a corresponding entry to /// `primitives` in AstGen.zig. pub const Index = enum(u32) { + pub const first_type: Index = .u1_type; + pub const last_type: Index = .empty_struct_type; + pub const first_value: Index = .undef; + pub const last_value: Index = .empty_struct; + u1_type, u8_type, i8_type, @@ -329,6 +334,7 @@ pub const Index = enum(u32) { bool_false, /// `.{}` (untyped) empty_struct, + /// Used for generic parameters where the type and value /// is not known until generic function instantiation. generic_poison, diff --git a/src/Liveness.zig b/src/Liveness.zig index 45d0705008f9..01fbee9e3608 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -131,7 +131,7 @@ fn LivenessPassData(comptime pass: LivenessPass) type { }; } -pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness { +pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocator.Error!Liveness { const tracy = trace(@src()); defer tracy.end(); @@ -144,6 +144,7 @@ pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness { ), .extra = .{}, .special = .{}, + .intern_pool = intern_pool, }; errdefer gpa.free(a.tomb_bits); errdefer a.special.deinit(gpa); @@ -322,6 +323,7 @@ pub fn categorizeOperand( .ret_ptr, .constant, .const_ty, + .interned, .trap, .breakpoint, .dbg_stmt, @@ -820,6 +822,7 @@ pub const BigTomb = struct { const Analysis = struct { gpa: Allocator, air: Air, + intern_pool: *const InternPool, tomb_bits: []usize, special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), @@ -971,6 +974,7 @@ fn analyzeInst( .constant, .const_ty, + .interned, => unreachable, .trap, @@ -1255,6 +1259,7 @@ fn analyzeOperands( ) Allocator.Error!void { const gpa = a.gpa; const inst_tags = a.air.instructions.items(.tag); + const ip = a.intern_pool; switch (pass) { .loop_analysis => { @@ -1265,7 +1270,7 @@ fn analyzeOperands( // Don't compute any liveness for constants switch (inst_tags[operand]) { - .constant, .const_ty => continue, + .constant, .const_ty, .interned => continue, else => {}, } @@ -1290,7 +1295,7 @@ fn analyzeOperands( // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (!immediate_death or a.air.mustLower(inst)) { + if (!immediate_death or a.air.mustLower(inst, ip.*)) { // Note that it's important we iterate over the operands backwards, so that if a dying // operand is used multiple times we mark its last use as its death. var i = operands.len; @@ -1301,7 +1306,7 @@ fn analyzeOperands( // Don't compute any liveness for constants switch (inst_tags[operand]) { - .constant, .const_ty => continue, + .constant, .const_ty, .interned => continue, else => {}, } @@ -1821,6 +1826,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { /// Must be called with operands in reverse order. fn feed(big: *Self, op_ref: Air.Inst.Ref) !void { + const ip = big.a.intern_pool; // Note that after this, `operands_remaining` becomes the index of the current operand big.operands_remaining -= 1; @@ -1834,14 +1840,14 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // Don't compute any liveness for constants const inst_tags = big.a.air.instructions.items(.tag); switch (inst_tags[operand]) { - .constant, .const_ty => return, + .constant, .const_ty, .interned => return, else => {}, } // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (big.will_die_immediately and !big.a.air.mustLower(big.inst)) return; + if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip.*)) return; const extra_byte = (big.operands_remaining - (bpi - 1)) / 31; const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31); diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index a55ebe52a690..e05f1814ceb9 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -5,6 +5,7 @@ air: Air, liveness: Liveness, live: LiveMap = .{}, blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{}, +intern_pool: *const InternPool, pub const Error = error{ LivenessInvalid, OutOfMemory }; @@ -27,10 +28,11 @@ pub fn verify(self: *Verify) Error!void { const LiveMap = std.AutoHashMapUnmanaged(Air.Inst.Index, void); fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { + const ip = self.intern_pool; const tag = self.air.instructions.items(.tag); const data = self.air.instructions.items(.data); for (body) |inst| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) { // This instruction will not be lowered and should be ignored. continue; } @@ -42,6 +44,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .ret_ptr, .constant, .const_ty, + .interned, .breakpoint, .dbg_stmt, .dbg_inline_begin, @@ -554,7 +557,7 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void { const operand = Air.refToIndex(op_ref) orelse return; switch (self.air.instructions.items(.tag)[operand]) { - .constant, .const_ty => {}, + .constant, .const_ty, .interned => {}, else => { if (dies) { if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); @@ -576,7 +579,7 @@ fn verifyInst( } const tag = self.air.instructions.items(.tag); switch (tag[inst]) { - .constant, .const_ty => unreachable, + .constant, .const_ty, .interned => unreachable, else => { if (self.liveness.isUnused(inst)) { assert(!self.live.contains(inst)); @@ -604,4 +607,5 @@ const log = std.log.scoped(.liveness_verify); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); +const InternPool = @import("../InternPool.zig"); const Verify = @This(); diff --git a/src/Module.zig b/src/Module.zig index 5756955d3c1c..4187ac206b5e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4397,7 +4397,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { if (no_bin_file and !dump_air and !dump_llvm_ir) return; log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air); + var liveness = try Liveness.analyze(gpa, air, &mod.intern_pool); defer liveness.deinit(gpa); if (dump_air) { @@ -4414,6 +4414,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { .gpa = gpa, .air = air, .liveness = liveness, + .intern_pool = &mod.intern_pool, }; defer verify.deinit(); diff --git a/src/Sema.zig b/src/Sema.zig index 9b76fee68e59..540474c84ab3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -33007,7 +33007,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - return sema.getTmpAir().typeOf(inst); + return sema.getTmpAir().typeOf(inst, sema.mod.intern_pool); } pub fn getTmpAir(sema: Sema) Air { @@ -33019,88 +33019,14 @@ pub fn getTmpAir(sema: Sema) Air { } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { - switch (ty.ip_index) { - .u1_type => return .u1_type, - .u8_type => return .u8_type, - .i8_type => return .i8_type, - .u16_type => return .u16_type, - .i16_type => return .i16_type, - .u29_type => return .u29_type, - .u32_type => return .u32_type, - .i32_type => return .i32_type, - .u64_type => return .u64_type, - .i64_type => return .i64_type, - .u80_type => return .u80_type, - .u128_type => return .u128_type, - .i128_type => return .i128_type, - .usize_type => return .usize_type, - .isize_type => return .isize_type, - .c_char_type => return .c_char_type, - .c_short_type => return .c_short_type, - .c_ushort_type => return .c_ushort_type, - .c_int_type => return .c_int_type, - .c_uint_type => return .c_uint_type, - .c_long_type => return .c_long_type, - .c_ulong_type => return .c_ulong_type, - .c_longlong_type => return .c_longlong_type, - .c_ulonglong_type => return .c_ulonglong_type, - .c_longdouble_type => return .c_longdouble_type, - .f16_type => return .f16_type, - .f32_type => return .f32_type, - .f64_type => return .f64_type, - .f80_type => return .f80_type, - .f128_type => return .f128_type, - .anyopaque_type => return .anyopaque_type, - .bool_type => return .bool_type, - .void_type => return .void_type, - .type_type => return .type_type, - .anyerror_type => return .anyerror_type, - .comptime_int_type => return .comptime_int_type, - .comptime_float_type => return .comptime_float_type, - .noreturn_type => return .noreturn_type, - .anyframe_type => return .anyframe_type, - .null_type => return .null_type, - .undefined_type => return .undefined_type, - .enum_literal_type => return .enum_literal_type, - .atomic_order_type => return .atomic_order_type, - .atomic_rmw_op_type => return .atomic_rmw_op_type, - .calling_convention_type => return .calling_convention_type, - .address_space_type => return .address_space_type, - .float_mode_type => return .float_mode_type, - .reduce_op_type => return .reduce_op_type, - .call_modifier_type => return .call_modifier_type, - .prefetch_options_type => return .prefetch_options_type, - .export_options_type => return .export_options_type, - .extern_options_type => return .extern_options_type, - .type_info_type => return .type_info_type, - .manyptr_u8_type => return .manyptr_u8_type, - .manyptr_const_u8_type => return .manyptr_const_u8_type, - .single_const_pointer_to_comptime_int_type => return .single_const_pointer_to_comptime_int_type, - .const_slice_u8_type => return .const_slice_u8_type, - .anyerror_void_error_union_type => return .anyerror_void_error_union_type, - .generic_poison_type => return .generic_poison_type, - .var_args_param_type => return .var_args_param_type, - .empty_struct_type => return .empty_struct_type, - - // values - .undef => unreachable, - .zero => unreachable, - .zero_usize => unreachable, - .one => unreachable, - .one_usize => unreachable, - .calling_convention_c => unreachable, - .calling_convention_inline => unreachable, - .void_value => unreachable, - .unreachable_value => unreachable, - .null_value => unreachable, - .bool_true => unreachable, - .bool_false => unreachable, - .empty_struct => unreachable, - .generic_poison => unreachable, - - _ => {}, - - .none => unreachable, + if (ty.ip_index != .none) { + if (@enumToInt(ty.ip_index) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(ty.ip_index)); + try sema.air_instructions.append(sema.gpa, .{ + .tag = .interned, + .data = .{ .interned = ty.ip_index }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } switch (ty.tag()) { .u1 => return .u1_type, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 4370977272a0..284663327503 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -521,7 +521,7 @@ fn gen(self: *Self) !void { const inst = self.air.getMainBody()[arg_index]; assert(self.air.instructions.items(.tag)[inst] == .arg); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const abi_size = @intCast(u32, ty.abiSize(mod)); const abi_align = ty.abiAlignment(mod); @@ -653,13 +653,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -845,6 +846,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -1028,7 +1030,7 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).elemType(); if (!elem_ty.hasRuntimeBits(mod)) { // return the stack offset 0. Stack offset 0 will be where all @@ -1067,7 +1069,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst); + const stack_mcv = try self.allocRegOrMem(self.typeOfIndex(inst), false, inst); log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -1079,14 +1081,14 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Save the current instruction stored in the compare flags if /// occupied fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.compare_flags_inst) |inst_to_save| { - const ty = self.air.typeOfIndex(inst_to_save); + const ty = self.typeOfIndex(inst_to_save); const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { .compare_flags => try self.allocRegOrMem(ty, true, inst_to_save), @@ -1094,7 +1096,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1126,9 +1128,9 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const raw_reg = try self.register_manager.allocReg(reg_owner, gp); - const ty = self.air.typeOfIndex(reg_owner); + const ty = self.typeOfIndex(reg_owner); const reg = self.registerAlias(raw_reg, ty); - try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); + try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -1181,10 +1183,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const operand = ty_op.operand; const operand_mcv = try self.resolveInst(operand); - const operand_ty = self.air.typeOf(operand); + const operand_ty = self.typeOf(operand); const operand_info = operand_ty.intInfo(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_info = dest_ty.intInfo(mod); const result: MCValue = result: { @@ -1201,14 +1203,14 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (dest_info.bits > operand_info.bits) { const dest_mcv = try self.allocRegOrMem(dest_ty, true, inst); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest_mcv, truncated); + try self.setRegOrMem(self.typeOfIndex(inst), dest_mcv, truncated); break :result dest_mcv; } else { if (self.reuseOperand(inst, operand, 0, truncated)) { break :result truncated; } else { const dest_mcv = try self.allocRegOrMem(dest_ty, true, inst); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest_mcv, truncated); + try self.setRegOrMem(self.typeOfIndex(inst), dest_mcv, truncated); break :result dest_mcv; } } @@ -1303,8 +1305,8 @@ fn trunc( fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand, operand_ty, dest_ty); @@ -1325,7 +1327,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, @@ -1492,8 +1494,8 @@ fn minMax( fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1512,9 +1514,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -2436,8 +2438,8 @@ fn ptrArithmetic( fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -2487,8 +2489,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -2525,10 +2527,10 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -2653,10 +2655,10 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -2877,10 +2879,10 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -3013,7 +3015,7 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOf(ty_op.operand); + const optional_ty = self.typeOf(ty_op.operand); const mcv = try self.resolveInst(ty_op.operand); break :result try self.optionalPayload(inst, mcv, optional_ty); }; @@ -3132,7 +3134,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionErr(error_union_bind, error_union_ty, inst); }; @@ -3212,7 +3214,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst); }; @@ -3266,12 +3268,12 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { } const result: MCValue = result: { - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); if (!payload_ty.hasRuntimeBits(mod)) { break :result MCValue{ .immediate = 1 }; } - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const operand_lock: ?RegisterLock = switch (operand) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -3433,7 +3435,7 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = slice_ty.slicePtrFieldType(&buf); @@ -3482,8 +3484,8 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const slice_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const slice_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null); break :result addr; @@ -3499,7 +3501,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -3516,8 +3518,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const ptr_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const ptr_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null); break :result addr; @@ -3862,16 +3864,16 @@ fn genInlineMemsetCode( } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); const mod = self.bin_file.options.module.?; + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const elem_ty = self.typeOfIndex(inst); const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -3886,7 +3888,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(elem_ty, true, inst); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -4068,8 +4070,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -4093,7 +4095,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde return if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); + const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -4118,7 +4120,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); + const struct_ty = self.typeOf(operand); const struct_field_ty = struct_ty.structFieldType(index); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); @@ -4194,7 +4196,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[inst]; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); @@ -4247,7 +4249,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -4294,7 +4296,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -4470,7 +4472,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const ret_ty = self.fn_type.fnReturnType(); switch (self.ret_mcv) { @@ -4512,7 +4514,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op); @@ -4652,7 +4654,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -4804,7 +4806,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -4831,7 +4833,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -4936,7 +4938,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNull(.{ .mcv = operand }, operand_ty); }; @@ -4947,7 +4949,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4962,7 +4964,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNonNull(.{ .mcv = operand }, operand_ty); }; @@ -4973,7 +4975,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4988,7 +4990,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isErr(error_union_bind, error_union_ty); }; @@ -4999,7 +5001,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -5014,7 +5016,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isNonErr(error_union_bind, error_union_ty); }; @@ -5025,7 +5027,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -5093,7 +5095,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try self.liveness.getSwitchBr( self.gpa, @@ -5241,7 +5243,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5249,14 +5251,14 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .immediate, .stack_argument_offset, .compare_flags => blk: { - const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + const new_mcv = try self.allocRegOrMem(self.typeOfIndex(block), true, block); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -5322,7 +5324,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -5945,7 +5947,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest = try self.allocRegOrMem(dest_ty, true, inst); try self.setRegOrMem(dest_ty, dest, operand); break :result dest; @@ -5956,7 +5958,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); @@ -6076,7 +6078,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -6125,7 +6127,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; - const error_union_ty = self.air.typeOf(pl_op.operand); + const error_union_ty = self.typeOf(pl_op.operand); const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); const error_union_align = error_union_ty.abiAlignment(mod); @@ -6159,7 +6161,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); + const inst_ty = self.typeOf(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; @@ -6428,3 +6430,13 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register { }, } } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 4c7151cd470a..eb8cfa97076a 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -477,6 +477,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { + const mod = self.bin_file.options.module.?; const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { // push {fp, lr} @@ -518,9 +519,8 @@ fn gen(self: *Self) !void { const inst = self.air.getMainBody()[arg_index]; assert(self.air.instructions.items(.tag)[inst] == .arg); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); - const mod = self.bin_file.options.module.?; const abi_size = @intCast(u32, ty.abiSize(mod)); const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); @@ -637,13 +637,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -829,6 +830,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -1008,7 +1010,7 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).elemType(); if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, @@ -1050,7 +1052,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst); + const stack_mcv = try self.allocRegOrMem(self.typeOfIndex(inst), false, inst); log.debug("spilling {} (%{d}) to stack mcv {any}", .{ reg, inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -1064,14 +1066,14 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Save the current instruction stored in the compare flags if /// occupied fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.cpsr_flags_inst) |inst_to_save| { - const ty = self.air.typeOfIndex(inst_to_save); + const ty = self.typeOfIndex(inst_to_save); const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { .cpsr_flags => try self.allocRegOrMem(ty, true, inst_to_save), @@ -1081,7 +1083,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1151,15 +1153,15 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); - const mod = self.bin_file.options.module.?; const operand_abi_size = operand_ty.abiSize(mod); const dest_abi_size = dest_ty.abiSize(mod); const info_a = operand_ty.intInfo(mod); @@ -1262,8 +1264,8 @@ fn trunc( fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand_bind, operand_ty, dest_ty); @@ -1284,7 +1286,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (try operand_bind.resolveToMcv(self)) { .dead => unreachable, .unreach => unreachable, @@ -1467,8 +1469,8 @@ fn minMax( fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1487,9 +1489,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const stack_offset = try self.allocMem(8, 4, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); @@ -1501,8 +1503,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1552,8 +1554,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1590,10 +1592,10 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -1703,10 +1705,10 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -1865,10 +1867,10 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); const mod = self.bin_file.options.module.?; const result: MCValue = result: { - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -2019,10 +2021,10 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOfIndex(inst); - const mod = self.bin_file.options.module.?; + const optional_ty = self.typeOfIndex(inst); const abi_size = @intCast(u32, optional_ty.abiSize(mod)); // Optional with a zero-bit payload type is just a boolean true @@ -2105,7 +2107,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionErr(error_union_bind, error_union_ty, inst); }; @@ -2182,7 +2184,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst); }; @@ -2430,7 +2432,7 @@ fn ptrElemVal( fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = slice_ty.slicePtrFieldType(&buf); @@ -2456,8 +2458,8 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const slice_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const slice_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null); break :result addr; @@ -2523,7 +2525,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const array_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); break :result try self.arrayElemVal(array_bind, index_bind, array_ty, inst); }; @@ -2532,7 +2534,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -2549,8 +2551,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const ptr_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const ptr_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null); break :result addr; @@ -2736,13 +2738,13 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -2755,7 +2757,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(elem_ty, true, inst); } }; - try self.load(dest_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dest_mcv, ptr, self.typeOf(ty_op.operand)); break :result dest_mcv; }; @@ -2860,8 +2862,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -2885,7 +2887,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde return if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); + const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -2910,7 +2912,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); + const struct_ty = self.typeOf(operand); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); const struct_field_ty = struct_ty.structFieldType(index); @@ -4169,7 +4171,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[inst]; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); @@ -4222,7 +4224,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -4276,7 +4278,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -4418,7 +4420,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const ret_ty = self.fn_type.fnReturnType(); switch (self.ret_mcv) { @@ -4461,7 +4463,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op); @@ -4600,7 +4602,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -4755,7 +4757,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -4782,7 +4784,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -4827,7 +4829,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = un_op }; - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNull(operand_bind, operand_ty); }; @@ -4838,7 +4840,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4853,7 +4855,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = un_op }; - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNonNull(operand_bind, operand_ty); }; @@ -4864,7 +4866,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4913,7 +4915,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isErr(error_union_bind, error_union_ty); }; @@ -4924,7 +4926,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4939,7 +4941,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isNonErr(error_union_bind, error_union_ty); }; @@ -4950,7 +4952,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -5018,7 +5020,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try self.liveness.getSwitchBr( self.gpa, @@ -5164,7 +5166,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5172,14 +5174,14 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .immediate, .stack_argument_offset, .cpsr_flags => blk: { - const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + const new_mcv = try self.allocRegOrMem(self.typeOfIndex(block), true, block); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -5243,7 +5245,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -5896,7 +5898,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest = try self.allocRegOrMem(dest_ty, true, inst); try self.setRegOrMem(dest_ty, dest, operand); break :result dest; @@ -5907,7 +5909,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); @@ -6023,7 +6025,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -6072,7 +6074,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; - const error_union_ty = self.air.typeOf(pl_op.operand); + const error_union_ty = self.typeOf(pl_op.operand); const mod = self.bin_file.options.module.?; const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); const error_union_align = error_union_ty.abiAlignment(mod); @@ -6107,7 +6109,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); + const inst_ty = self.typeOf(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; @@ -6333,3 +6335,13 @@ fn parseRegName(name: []const u8) ?Register { } return std.meta.stringToEnum(Register, name); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 75d5a87bf274..4ab798fe9c50 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -470,13 +470,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -658,6 +659,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -804,8 +806,8 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).elemType(); const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -815,8 +817,8 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = self.air.typeOfIndex(inst); const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -845,7 +847,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void assert(reg == reg_mcv.register); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Copies a value to a register without tracking the register. The register is not considered @@ -862,7 +864,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const reg = try self.register_manager.allocReg(reg_owner, gp); - try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); + try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -894,10 +896,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const mod = self.bin_file.options.module.?; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const info_a = operand_ty.intInfo(mod); - const info_b = self.air.typeOfIndex(inst).intInfo(mod); + const info_b = self.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1126,8 +1128,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1138,8 +1140,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1333,7 +1335,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true if (optional_ty.abiSize(mod) == 1) @@ -1525,15 +1527,15 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { - const mod = self.bin_file.options.module.?; if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -1545,7 +1547,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1586,8 +1588,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -1647,7 +1649,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); _ = ty; const result = self.args[arg_index]; @@ -1704,7 +1706,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const fn_ty = self.air.typeOf(pl_op.operand); + const fn_ty = self.typeOf(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); @@ -1717,7 +1719,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (self.bin_file.cast(link.File.Elf)) |elf_file| { for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -1829,9 +1831,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const mod = self.bin_file.options.module.?; - assert(ty.eql(self.air.typeOf(bin_op.rhs), mod)); + assert(ty.eql(self.typeOf(bin_op.rhs), mod)); if (ty.zigTypeTag(mod) == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); @@ -1950,7 +1952,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNull(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1977,7 +1979,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNonNull(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2004,7 +2006,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isErr(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2031,7 +2033,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNonErr(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2112,13 +2114,13 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; const mod = self.bin_file.options.module.?; - if (self.air.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { block_data.mcv = operand_mcv; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -2181,7 +2183,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -2377,7 +2379,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand); + try self.setRegOrMem(self.typeOfIndex(inst), dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2494,7 +2496,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -2541,7 +2543,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); + const inst_ty = self.typeOf(inst); if (!inst_ty.hasRuntimeBits(mod)) return MCValue{ .none = {} }; @@ -2733,3 +2735,13 @@ fn parseRegName(name: []const u8) ?Register { } return std.meta.stringToEnum(Register, name); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 63b604857e47..e79a216315ca 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -490,13 +490,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -678,6 +679,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -762,8 +764,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), @@ -836,7 +838,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -871,7 +873,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); @@ -935,7 +937,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -1008,16 +1010,16 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const arg = self.args[arg_index]; const mcv = blk: { switch (arg) { .stack_offset => |off| { - const mod = self.bin_file.options.module.?; const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; @@ -1063,8 +1065,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else @@ -1088,8 +1090,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else @@ -1115,7 +1117,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand); + try self.setRegOrMem(self.typeOfIndex(inst), dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1218,7 +1220,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { // TODO: Fold byteswap+store into a single ST*A and load+byteswap into a single LD*A. const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO byteswap for vectors", .{}), .Int => { @@ -1294,7 +1296,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, @@ -1318,7 +1320,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { @@ -1428,7 +1430,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Vector => unreachable, // Handled by cmp_vector. @@ -1605,7 +1607,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -1632,7 +1634,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -1755,10 +1757,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const mod = self.bin_file.options.module.?; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const info_a = operand_ty.intInfo(mod); - const info_b = self.air.typeOfIndex(inst).intInfo(mod); + const info_b = self.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1780,7 +1782,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); break :result try self.isErr(ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1790,7 +1792,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); break :result try self.isNonErr(ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1815,16 +1817,16 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); const mod = self.bin_file.options.module.?; + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const elem_ty = self.typeOfIndex(inst); const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -1839,7 +1841,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1882,8 +1884,8 @@ fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead @@ -1897,8 +1899,8 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); assert(lhs_ty.eql(rhs_ty, self.bin_file.options.module.?)); if (self.liveness.isUnused(inst)) @@ -2045,8 +2047,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), @@ -2108,7 +2110,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, @@ -2285,8 +2287,8 @@ fn airRem(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); // TODO add safety check @@ -2341,8 +2343,8 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), @@ -2429,9 +2431,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -2453,7 +2455,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_mcv = try self.resolveInst(bin_op.lhs); const index_mcv = try self.resolveInst(bin_op.rhs); - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const elem_ty = slice_ty.childType(); const mod = self.bin_file.options.module.?; const elem_size = elem_ty.abiSize(mod); @@ -2544,8 +2546,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -2573,7 +2575,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); + const struct_ty = self.typeOf(operand); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -2659,8 +2661,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand, operand_ty, dest_ty); @@ -2674,7 +2676,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { - const error_union_ty = self.air.typeOf(pl_op.operand); + const error_union_ty = self.typeOf(pl_op.operand); const error_union = try self.resolveInst(pl_op.operand); const is_err_result = try self.isErr(error_union_ty, error_union); const reloc = try self.condBr(is_err_result); @@ -2706,7 +2708,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); const mod = self.bin_file.options.module.?; @@ -2720,7 +2722,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); const mod = self.bin_file.options.module.?; if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none; @@ -2753,12 +2755,12 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - const mod = self.bin_file.options.module.?; if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; @@ -2794,9 +2796,9 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); - const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).elemType(); + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all @@ -2814,8 +2816,8 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = self.air.typeOfIndex(inst); const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -3406,7 +3408,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; const mod = self.bin_file.options.module.?; - if (self.air.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -3415,13 +3417,13 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .register, .stack_offset, .memory => operand_mcv, .immediate => blk: { const new_mcv = try self.allocRegOrMem(block, true); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -4549,7 +4551,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; - const ty = self.air.typeOf(ref); + const ty = self.typeOf(ref); // If the type has no codegen bits, no need to store it. if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; @@ -4654,7 +4656,7 @@ fn spillConditionFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -4678,7 +4680,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void assert(reg == reg_mcv.register); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { @@ -4726,7 +4728,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde return if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); + const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -4885,3 +4887,13 @@ fn wantSafety(self: *Self) bool { .ReleaseSmall => false, }; } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b592ffcb2a00..cd61eaf1fbdb 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -790,7 +790,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { const mod = func.bin_file.base.options.module.?; const val = func.air.value(ref, mod).?; - const ty = func.air.typeOf(ref); + const ty = func.typeOf(ref); if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; return gop.value_ptr.*; @@ -1260,7 +1260,7 @@ fn genFunc(func: *CodeGen) InnerError!void { // we emit an unreachable instruction to tell the stack validator that part will never be reached. if (func_type.returns.len != 0 and func.air.instructions.len > 0) { const inst = @intCast(u32, func.air.instructions.len - 1); - const last_inst_ty = func.air.typeOfIndex(inst); + const last_inst_ty = func.typeOfIndex(inst); if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn()) { try func.addTag(.@"unreachable"); } @@ -1541,7 +1541,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { /// if it is set, to ensure the stack alignment will be set correctly. fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { const mod = func.bin_file.base.options.module.?; - const ptr_ty = func.air.typeOfIndex(inst); + const ptr_ty = func.typeOfIndex(inst); const pointee_ty = ptr_ty.childType(); if (func.initial_stack_value == .none) { @@ -1834,6 +1834,7 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return switch (air_tags[inst]) { .constant => unreachable, .const_ty => unreachable, + .interned => unreachable, .add => func.airBinOp(inst, .add), .add_sat => func.airSatBinOp(inst, .add), @@ -2073,8 +2074,11 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; + const ip = &mod.intern_pool; + for (body) |inst| { - if (func.liveness.isUnused(inst) and !func.air.mustLower(inst)) { + if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip.*)) { continue; } const old_bookkeeping_value = func.air_bookkeeping; @@ -2134,8 +2138,8 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const child_type = func.air.typeOfIndex(inst).childType(); const mod = func.bin_file.base.options.module.?; + const child_type = func.typeOfIndex(inst).childType(); var result = result: { if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { @@ -2157,7 +2161,7 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ret_ty = func.air.typeOf(un_op).childType(); + const ret_ty = func.typeOf(un_op).childType(); const fn_info = func.decl.ty.fnInfo(); if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2179,7 +2183,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const pl_op = func.air.instructions.items(.data)[inst].pl_op; const extra = func.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]); - const ty = func.air.typeOf(pl_op.operand); + const ty = func.typeOf(pl_op.operand); const mod = func.bin_file.base.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -2228,7 +2232,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif for (args) |arg| { const arg_val = try func.resolveInst(arg); - const arg_ty = func.air.typeOf(arg); + const arg_ty = func.typeOf(arg); if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val); @@ -2296,7 +2300,7 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const ptr_info = ptr_ty.ptrInfo().data; const ty = ptr_ty.childType(); @@ -2449,7 +2453,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.air.getRefType(ty_op.ty); - const ptr_ty = func.air.typeOf(ty_op.operand); + const ptr_ty = func.typeOf(ty_op.operand); const ptr_info = ptr_ty.ptrInfo().data; if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand}); @@ -2522,11 +2526,11 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu } fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const arg_index = func.arg_index; const arg = func.args[arg_index]; const cc = func.decl.ty.fnInfo().cc; - const arg_ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const arg_ty = func.typeOfIndex(inst); if (cc == .C) { const arg_classes = abi.classifyType(arg_ty, mod); for (arg_classes) |class| { @@ -2572,8 +2576,8 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.air.typeOf(bin_op.lhs); - const rhs_ty = func.air.typeOf(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); // For certain operations, such as shifting, the types are different. // When converting this to a WebAssembly type, they *must* match to perform @@ -2770,7 +2774,7 @@ const FloatOp = enum { fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ty = func.air.typeOf(un_op); + const ty = func.typeOf(un_op); const result = try (try func.floatOp(op, ty, &.{operand})).toLocal(func, ty); func.finishAir(inst, result, &.{un_op}); @@ -2847,8 +2851,8 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.air.typeOf(bin_op.lhs); - const rhs_ty = func.air.typeOf(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement wrapping arithmetic for vectors", .{}); @@ -3387,7 +3391,7 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const operand_ty = func.air.typeOf(bin_op.lhs); + const operand_ty = func.typeOf(bin_op.lhs); const result = try (try func.cmp(lhs, rhs, operand_ty, op)).toLocal(func, Type.u32); // comparison result is always 32 bits func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } @@ -3488,7 +3492,7 @@ fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const block = func.blocks.get(br.block_inst).?; // if operand has codegen bits we should break with a value - if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) { + if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) { const operand = try func.resolveInst(br.operand); try func.lowerToStack(operand); @@ -3509,7 +3513,7 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const operand_ty = func.air.typeOf(ty_op.operand); + const operand_ty = func.typeOf(ty_op.operand); const mod = func.bin_file.base.options.module.?; const result = result: { @@ -3575,8 +3579,8 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const result = result: { const operand = try func.resolveInst(ty_op.operand); - const wanted_ty = func.air.typeOfIndex(inst); - const given_ty = func.air.typeOf(ty_op.operand); + const wanted_ty = func.typeOfIndex(inst); + const given_ty = func.typeOf(ty_op.operand); if (given_ty.isAnyFloat() or wanted_ty.isAnyFloat()) { const bitcast_result = try func.bitcast(wanted_ty, given_ty, operand); break :result try bitcast_result.toLocal(func, wanted_ty); @@ -3609,7 +3613,7 @@ fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.StructField, ty_pl.payload); const struct_ptr = try func.resolveInst(extra.data.struct_operand); - const struct_ty = func.air.typeOf(extra.data.struct_operand).childType(); + const struct_ty = func.typeOf(extra.data.struct_operand).childType(); const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ty, extra.data.field_index); func.finishAir(inst, result, &.{extra.data.struct_operand}); } @@ -3617,7 +3621,7 @@ fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try func.resolveInst(ty_op.operand); - const struct_ty = func.air.typeOf(ty_op.operand).childType(); + const struct_ty = func.typeOf(ty_op.operand).childType(); const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ty, index); func.finishAir(inst, result, &.{ty_op.operand}); @@ -3632,7 +3636,7 @@ fn structFieldPtr( index: u32, ) InnerError!WValue { const mod = func.bin_file.base.options.module.?; - const result_ty = func.air.typeOfIndex(inst); + const result_ty = func.typeOfIndex(inst); const offset = switch (struct_ty.containerLayout()) { .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { @@ -3663,7 +3667,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = func.air.typeOf(struct_field.struct_operand); + const struct_ty = func.typeOf(struct_field.struct_operand); const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); @@ -3762,7 +3766,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const blocktype = wasm.block_empty; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const target = try func.resolveInst(pl_op.operand); - const target_ty = func.air.typeOf(pl_op.operand); + const target_ty = func.typeOf(pl_op.operand); const switch_br = func.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.data.cases_len + 1); defer func.gpa.free(liveness.deaths); @@ -3940,7 +3944,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const err_union_ty = func.air.typeOf(un_op); + const err_union_ty = func.typeOf(un_op); const pl_ty = err_union_ty.errorUnionPayload(); const result = result: { @@ -3976,7 +3980,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); + const op_ty = func.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; const payload_ty = err_ty.errorUnionPayload(); @@ -4004,7 +4008,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); + const op_ty = func.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; const payload_ty = err_ty.errorUnionPayload(); @@ -4028,9 +4032,9 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const err_ty = func.air.typeOfIndex(inst); + const err_ty = func.typeOfIndex(inst); - const pl_ty = func.air.typeOf(ty_op.operand); + const pl_ty = func.typeOf(ty_op.operand); const result = result: { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); @@ -4082,7 +4086,7 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.getRefType(ty_op.ty); const operand = try func.resolveInst(ty_op.operand); - const operand_ty = func.air.typeOf(ty_op.operand); + const operand_ty = func.typeOf(ty_op.operand); const mod = func.bin_file.base.options.module.?; if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) { return func.fail("todo Wasm intcast for vectors", .{}); @@ -4155,7 +4159,7 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const op_ty = func.air.typeOf(un_op); + const op_ty = func.typeOf(un_op); const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty; const is_null = try func.isNull(operand, optional_ty, opcode); const result = try is_null.toLocal(func, optional_ty); @@ -4196,8 +4200,8 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const opt_ty = func.air.typeOf(ty_op.operand); - const payload_ty = func.air.typeOfIndex(inst); + const opt_ty = func.typeOf(ty_op.operand); + const payload_ty = func.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.finishAir(inst, .none, &.{ty_op.operand}); } @@ -4219,7 +4223,7 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.air.typeOf(ty_op.operand).childType(); + const opt_ty = func.typeOf(ty_op.operand).childType(); const mod = func.bin_file.base.options.module.?; const result = result: { @@ -4238,7 +4242,7 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.air.typeOf(ty_op.operand).childType(); + const opt_ty = func.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4263,7 +4267,7 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const payload_ty = func.air.typeOf(ty_op.operand); + const payload_ty = func.typeOf(ty_op.operand); const mod = func.bin_file.base.options.module.?; const result = result: { @@ -4276,7 +4280,7 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOfIndex(inst); + const op_ty = func.typeOfIndex(inst); if (op_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } @@ -4304,7 +4308,7 @@ fn airSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const slice_ty = func.air.typeOfIndex(inst); + const slice_ty = func.typeOfIndex(inst); const slice = try func.allocStack(slice_ty); try func.store(slice, lhs, Type.usize, 0); @@ -4323,7 +4327,7 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const slice_ty = func.air.typeOf(bin_op.lhs); + const slice_ty = func.typeOf(bin_op.lhs); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(); @@ -4395,7 +4399,7 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const wanted_ty = func.air.getRefType(ty_op.ty); - const op_ty = func.air.typeOf(ty_op.operand); + const op_ty = func.typeOf(ty_op.operand); const result = try func.trunc(operand, wanted_ty, op_ty); func.finishAir(inst, try result.toLocal(func, wanted_ty), &.{ty_op.operand}); @@ -4432,7 +4436,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const array_ty = func.air.typeOf(ty_op.operand).childType(); + const array_ty = func.typeOf(ty_op.operand).childType(); const slice_ty = func.air.getRefType(ty_op.ty); // create a slice on the stack @@ -4453,7 +4457,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ptr_ty = func.air.typeOf(un_op); + const ptr_ty = func.typeOf(un_op); const result = if (ptr_ty.isSlice()) try func.slicePtr(operand) else switch (operand) { @@ -4467,7 +4471,7 @@ fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = ptr_ty.childType(); @@ -4505,7 +4509,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const elem_ty = func.air.getRefType(ty_pl.ty).childType(); const mod = func.bin_file.base.options.module.?; const elem_size = elem_ty.abiSize(mod); @@ -4538,7 +4542,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const offset = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const pointee_ty = switch (ptr_ty.ptrSize()) { .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), @@ -4568,7 +4572,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr = try func.resolveInst(bin_op.lhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const value = try func.resolveInst(bin_op.rhs); const len = switch (ptr_ty.ptrSize()) { .Slice => try func.sliceLen(ptr), @@ -4683,7 +4687,7 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const array_ty = func.air.typeOf(bin_op.lhs); + const array_ty = func.typeOf(bin_op.lhs); const array = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = array_ty.childType(); @@ -4750,12 +4754,12 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const dest_ty = func.air.typeOfIndex(inst); - const op_ty = func.air.typeOf(ty_op.operand); - const mod = func.bin_file.base.options.module.?; + const dest_ty = func.typeOfIndex(inst); + const op_ty = func.typeOf(ty_op.operand); if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{}); @@ -4775,12 +4779,12 @@ fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const dest_ty = func.air.typeOfIndex(inst); - const op_ty = func.air.typeOf(ty_op.operand); - const mod = func.bin_file.base.options.module.?; + const dest_ty = func.typeOfIndex(inst); + const op_ty = func.typeOf(ty_op.operand); if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{}); @@ -4804,7 +4808,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const elem_ty = ty.childType(); if (determineSimdStoreStrategy(ty, mod) == .direct) blk: { @@ -4881,7 +4885,7 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; - const inst_ty = func.air.typeOfIndex(inst); + const inst_ty = func.typeOfIndex(inst); const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -4894,7 +4898,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_size = child_ty.abiSize(mod); // TODO: One of them could be by ref; handle in loop - if (isByRef(func.air.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) { + if (isByRef(func.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { @@ -4951,11 +4955,11 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; - const result_ty = func.air.typeOfIndex(inst); + const result_ty = func.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); - const mod = func.bin_file.base.options.module.?; const result: WValue = result_value: { switch (result_ty.zigTypeTag(mod)) { @@ -5085,7 +5089,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data; const result = result: { - const union_ty = func.air.typeOfIndex(inst); + const union_ty = func.typeOfIndex(inst); const layout = union_ty.unionGetLayout(mod); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = union_obj.fields.values()[extra.field_index]; @@ -5164,7 +5168,7 @@ fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airWasmMemorySize(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const pl_op = func.air.instructions.items(.data)[inst].pl_op; - const result = try func.allocLocal(func.air.typeOfIndex(inst)); + const result = try func.allocLocal(func.typeOfIndex(inst)); try func.addLabel(.memory_size, pl_op.payload); try func.addLabel(.local_set, result.local.value); func.finishAir(inst, result, &.{pl_op.operand}); @@ -5174,7 +5178,7 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void { const pl_op = func.air.instructions.items(.data)[inst].pl_op; const operand = try func.resolveInst(pl_op.operand); - const result = try func.allocLocal(func.air.typeOfIndex(inst)); + const result = try func.allocLocal(func.typeOfIndex(inst)); try func.emitWValue(operand); try func.addLabel(.memory_grow, pl_op.payload); try func.addLabel(.local_set, result.local.value); @@ -5263,8 +5267,8 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const un_ty = func.air.typeOf(bin_op.lhs).childType(); - const tag_ty = func.air.typeOf(bin_op.rhs); + const un_ty = func.typeOf(bin_op.lhs).childType(); + const tag_ty = func.typeOf(bin_op.rhs); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); @@ -5288,8 +5292,8 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const un_ty = func.air.typeOf(ty_op.operand); - const tag_ty = func.air.typeOfIndex(inst); + const un_ty = func.typeOf(ty_op.operand); + const tag_ty = func.typeOfIndex(inst); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand}); @@ -5307,9 +5311,9 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const dest_ty = func.air.typeOfIndex(inst); + const dest_ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const extended = try func.fpext(operand, func.air.typeOf(ty_op.operand), dest_ty); + const extended = try func.fpext(operand, func.typeOf(ty_op.operand), dest_ty); const result = try extended.toLocal(func, dest_ty); func.finishAir(inst, result, &.{ty_op.operand}); } @@ -5352,9 +5356,9 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const dest_ty = func.air.typeOfIndex(inst); + const dest_ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const truncated = try func.fptrunc(operand, func.air.typeOf(ty_op.operand), dest_ty); + const truncated = try func.fptrunc(operand, func.typeOf(ty_op.operand), dest_ty); const result = try truncated.toLocal(func, dest_ty); func.finishAir(inst, result, &.{ty_op.operand}); } @@ -5393,7 +5397,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const err_set_ty = func.air.typeOf(ty_op.operand).childType(); + const err_set_ty = func.typeOf(ty_op.operand).childType(); const payload_ty = err_set_ty.errorUnionPayload(); const operand = try func.resolveInst(ty_op.operand); @@ -5448,10 +5452,10 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const dst = try func.resolveInst(bin_op.lhs); - const dst_ty = func.air.typeOf(bin_op.lhs); + const dst_ty = func.typeOf(bin_op.lhs); const ptr_elem_ty = dst_ty.childType(); const src = try func.resolveInst(bin_op.rhs); - const src_ty = func.air.typeOf(bin_op.rhs); + const src_ty = func.typeOf(bin_op.rhs); const len = switch (dst_ty.ptrSize()) { .Slice => blk: { const slice_len = try func.sliceLen(dst); @@ -5485,12 +5489,12 @@ fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const op_ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); if (op_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement @popCount for vectors", .{}); @@ -5585,7 +5589,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const lhs_op = try func.resolveInst(extra.lhs); const rhs_op = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); + const lhs_ty = func.typeOf(extra.lhs); const mod = func.bin_file.base.options.module.?; if (lhs_ty.zigTypeTag(mod) == .Vector) { @@ -5599,7 +5603,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro }; if (wasm_bits == 128) { - const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.air.typeOfIndex(inst), op); + const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.typeOfIndex(inst), op); return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); } @@ -5649,7 +5653,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro var overflow_local = try overflow_bit.toLocal(func, Type.u32); defer overflow_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); @@ -5729,8 +5733,8 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); - const rhs_ty = func.air.typeOf(extra.rhs); + const lhs_ty = func.typeOf(extra.lhs); + const rhs_ty = func.typeOf(extra.rhs); if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); @@ -5771,7 +5775,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1)); defer overflow_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); @@ -5785,7 +5789,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); + const lhs_ty = func.typeOf(extra.lhs); const mod = func.bin_file.base.options.module.?; if (lhs_ty.zigTypeTag(mod) == .Vector) { @@ -5946,7 +5950,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var bin_op_local = try bin_op.toLocal(func, lhs_ty); defer bin_op_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset); @@ -5955,10 +5959,10 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const ty = func.typeOfIndex(inst); if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{}); } @@ -5986,11 +5990,11 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE } fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data; - const ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const ty = func.typeOfIndex(inst); if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@mulAdd` for vectors", .{}); } @@ -6020,11 +6024,11 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@clz` for vectors", .{}); } @@ -6073,12 +6077,12 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); + const ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@ctz` for vectors", .{}); } @@ -6141,7 +6145,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void { if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{}); const pl_op = func.air.instructions.items(.data)[inst].pl_op; - const ty = func.air.typeOf(pl_op.operand); + const ty = func.typeOf(pl_op.operand); const operand = try func.resolveInst(pl_op.operand); log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), operand }); @@ -6179,7 +6183,7 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const err_union = try func.resolveInst(pl_op.operand); const extra = func.air.extraData(Air.Try, pl_op.payload); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.air.typeOf(pl_op.operand); + const err_union_ty = func.typeOf(pl_op.operand); const result = try lowerTry(func, inst, err_union, body, err_union_ty, false); func.finishAir(inst, result, &.{pl_op.operand}); } @@ -6189,7 +6193,7 @@ fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try func.resolveInst(extra.data.ptr); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = func.typeOf(extra.data.ptr).childType(); const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true); func.finishAir(inst, result, &.{extra.data.ptr}); } @@ -6251,11 +6255,11 @@ fn lowerTry( } fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const mod = func.bin_file.base.options.module.?; if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: @byteSwap for vectors", .{}); @@ -6325,7 +6329,7 @@ fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6340,7 +6344,7 @@ fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6361,7 +6365,7 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const mod = func.bin_file.base.options.module.?; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6512,7 +6516,7 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const mod = func.bin_file.base.options.module.?; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6626,7 +6630,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const mod = func.bin_file.base.options.module.?; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { @@ -6785,11 +6789,11 @@ fn callIntrinsic( fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const enum_ty = func.air.typeOf(un_op); + const enum_ty = func.typeOf(un_op); const func_sym_index = try func.getTagNameFunction(enum_ty); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.lowerToStack(result_ptr); try func.emitWValue(operand); try func.addLabel(.call, func_sym_index); @@ -7061,9 +7065,9 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const ptr_ty = func.air.typeOf(extra.ptr); + const ptr_ty = func.typeOf(extra.ptr); const ty = ptr_ty.childType(); - const result_ty = func.air.typeOfIndex(inst); + const result_ty = func.typeOfIndex(inst); const ptr_operand = try func.resolveInst(extra.ptr); const expected_val = try func.resolveInst(extra.expected_value); @@ -7133,7 +7137,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const atomic_load = func.air.instructions.items(.data)[inst].atomic_load; const ptr = try func.resolveInst(atomic_load.ptr); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); if (func.useAtomicFeature()) { const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { @@ -7163,7 +7167,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr = try func.resolveInst(pl_op.operand); const operand = try func.resolveInst(extra.operand); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const op: std.builtin.AtomicRmwOp = extra.op(); if (func.useAtomicFeature()) { @@ -7348,7 +7352,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const operand = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const ty = ptr_ty.childType(); if (func.useAtomicFeature()) { @@ -7380,3 +7384,13 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try WValue.toLocal(.stack, func, Type.usize); return func.finishAir(inst, result, &.{}); } + +fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type { + const mod = func.bin_file.base.options.module.?; + return func.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type { + const mod = func.bin_file.base.options.module.?; + return func.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 826bca226678..865ebe02f75e 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -447,7 +447,7 @@ const InstTracking = struct { else => unreachable, } tracking_log.debug("spill %{d} from {} to {}", .{ inst, self.short, self.long }); - try function.genCopy(function.air.typeOfIndex(inst), self.long, self.short); + try function.genCopy(function.typeOfIndex(inst), self.long, self.short); } fn reuseFrame(self: *InstTracking) void { @@ -537,7 +537,7 @@ const InstTracking = struct { inst: Air.Inst.Index, target: InstTracking, ) !void { - const ty = function.air.typeOfIndex(inst); + const ty = function.typeOfIndex(inst); if ((self.long == .none or self.long == .reserved_frame) and target.long == .load_frame) try function.genCopy(ty, target.long, self.short); try function.genCopy(ty, target.short, self.short); @@ -1725,6 +1725,8 @@ fn gen(self: *Self) InnerError!void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { @@ -1733,7 +1735,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { try self.mir_to_air_map.put(self.gpa, mir_inst, inst); } - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) continue; + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; wip_mir_log.debug("{}", .{self.fmtAir(inst)}); verbose_tracking_log.debug("{}", .{self.fmtTracking()}); @@ -1919,6 +1921,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => if (self.wantSafety()) try self.airTrap() else self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -2255,7 +2258,7 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { const mod = self.bin_file.options.module.?; - const ptr_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOfIndex(inst); const val_ty = ptr_ty.childType(); return self.allocFrameIndex(FrameAlloc.init(.{ .size = math.cast(u32, val_ty.abiSize(mod)) orelse { @@ -2266,7 +2269,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - return self.allocRegOrMemAdvanced(self.air.typeOfIndex(inst), inst, reg_ok); + return self.allocRegOrMemAdvanced(self.typeOfIndex(inst), inst, reg_ok); } fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue { @@ -2485,7 +2488,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { .load_frame => .{ .register_offset = .{ .reg = (try self.copyToRegisterWithInstTracking( inst, - self.air.typeOfIndex(inst), + self.typeOfIndex(inst), self.ret_mcv.long, )).register, .off = self.ret_mcv.short.indirect.off, @@ -2496,9 +2499,9 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_bits = dst_ty.floatBits(self.target.*); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.floatBits(self.target.*); const src_mcv = try self.resolveInst(ty_op.operand); @@ -2562,9 +2565,9 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { fn airFpext(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_bits = dst_ty.floatBits(self.target.*); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.floatBits(self.target.*); const src_mcv = try self.resolveInst(ty_op.operand); @@ -2625,10 +2628,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_int_info = src_ty.intInfo(mod); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_int_info = dst_ty.intInfo(mod); const abi_size = @intCast(u32, dst_ty.abiSize(mod)); @@ -2707,9 +2710,9 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const result = result: { @@ -2818,7 +2821,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const operand = try self.resolveInst(un_op); const dst_mcv = if (self.reuseOperand(inst, un_op, 0, operand)) @@ -2834,11 +2837,11 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const slice_ty = self.air.typeOfIndex(inst); + const slice_ty = self.typeOfIndex(inst); const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); @@ -2877,7 +2880,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { const air_tag = self.air.instructions.items(.tag); const air_data = self.air.instructions.items(.data); - const dst_ty = self.air.typeOf(dst_air); + const dst_ty = self.typeOf(dst_air); const dst_info = dst_ty.intInfo(mod); if (Air.refToIndex(dst_air)) |inst| { switch (air_tag[inst]) { @@ -2889,7 +2892,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { @boolToInt(src_int.positive and dst_info.signedness == .signed); }, .intcast => { - const src_ty = self.air.typeOf(air_data[inst].ty_op.operand); + const src_ty = self.typeOf(air_data[inst].ty_op.operand); const src_info = src_ty.intInfo(mod); return @min(switch (src_info.signedness) { .signed => switch (dst_info.signedness) { @@ -2913,7 +2916,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result = result: { const tag = self.air.instructions.items(.tag)[inst]; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); switch (dst_ty.zigTypeTag(mod)) { .Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs), else => {}, @@ -2942,7 +2945,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) @@ -3021,7 +3024,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) @@ -3093,7 +3096,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); try self.spillRegisters(&.{ .rax, .rdx }); const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); @@ -3151,7 +3154,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); switch (ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}), .Int => { @@ -3168,7 +3171,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .signed => .o, }; - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { switch (partial_mcv) { .register => |reg| { @@ -3211,8 +3214,8 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl with overflow for Vector type", .{}), .Int => { @@ -3241,7 +3244,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, tmp_mcv, lhs); const cc = Condition.ne; - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { switch (partial_mcv) { .register => |reg| { @@ -3349,7 +3352,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const dst_ty = self.air.typeOf(bin_op.lhs); + const dst_ty = self.typeOf(bin_op.lhs); const result: MCValue = switch (dst_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}), .Int => result: { @@ -3370,7 +3373,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const extra_bits = if (dst_info.bits <= 64) self.regExtraBits(dst_ty) else @@ -3525,8 +3528,8 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { try self.register_manager.getReg(.rcx, null); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result = try self.genShiftBinOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); @@ -3543,7 +3546,7 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const pl_ty = self.air.typeOfIndex(inst); + const pl_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) { @@ -3568,7 +3571,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) @@ -3582,8 +3585,8 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const opt_ty = src_ty.childType(); const src_mcv = try self.resolveInst(ty_op.operand); @@ -3615,7 +3618,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOf(ty_op.operand); + const err_union_ty = self.typeOf(ty_op.operand); const err_ty = err_union_ty.errorUnionSet(); const payload_ty = err_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); @@ -3662,7 +3665,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOf(ty_op.operand); + const err_union_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const result = try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, operand); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -3720,7 +3723,7 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3756,7 +3759,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3765,7 +3768,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -3791,7 +3794,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3816,7 +3819,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) break :result .unreach; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -3856,10 +3859,10 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const pl_ty = self.air.typeOf(ty_op.operand); + const pl_ty = self.typeOf(ty_op.operand); if (!pl_ty.hasRuntimeBits(mod)) break :result .{ .immediate = 1 }; - const opt_ty = self.air.typeOfIndex(inst); + const opt_ty = self.typeOfIndex(inst); const pl_mcv = try self.resolveInst(ty_op.operand); const same_repr = opt_ty.optionalReprIsPayload(mod); if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv; @@ -3952,7 +3955,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); try self.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; @@ -3980,7 +3983,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3989,7 +3992,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -4014,7 +4017,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) @@ -4046,7 +4049,7 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { const mod = self.bin_file.options.module.?; - const slice_ty = self.air.typeOf(lhs); + const slice_ty = self.typeOf(lhs); const slice_mcv = try self.resolveInst(lhs); const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4059,7 +4062,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); - const index_ty = self.air.typeOf(rhs); + const index_ty = self.typeOf(rhs); const index_mcv = try self.resolveInst(rhs); const index_mcv_lock: ?RegisterLock = switch (index_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4083,7 +4086,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); @@ -4105,7 +4108,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); const array = try self.resolveInst(bin_op.lhs); const array_lock: ?RegisterLock = switch (array) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4116,7 +4119,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = array_ty.childType(); const elem_abi_size = elem_ty.abiSize(mod); - const index_ty = self.air.typeOf(bin_op.rhs); + const index_ty = self.typeOf(bin_op.rhs); const index = try self.resolveInst(bin_op.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4170,14 +4173,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); // this is identical to the `airPtrElemPtr` codegen expect here an // additional `mov` is needed at the end to get the actual value const elem_ty = ptr_ty.elemType2(mod); const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); - const index_ty = self.air.typeOf(bin_op.rhs); + const index_ty = self.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_lock = switch (index_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4218,7 +4221,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(extra.lhs); + const ptr_ty = self.typeOf(extra.lhs); const ptr = try self.resolveInst(extra.lhs); const ptr_lock: ?RegisterLock = switch (ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4228,7 +4231,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = ptr_ty.elemType2(mod); const elem_abi_size = elem_ty.abiSize(mod); - const index_ty = self.air.typeOf(extra.rhs); + const index_ty = self.typeOf(extra.rhs); const index = try self.resolveInst(extra.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4249,9 +4252,9 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_union_ty = self.air.typeOf(bin_op.lhs); + const ptr_union_ty = self.typeOf(bin_op.lhs); const union_ty = ptr_union_ty.childType(); - const tag_ty = self.air.typeOf(bin_op.rhs); + const tag_ty = self.typeOf(bin_op.rhs); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { @@ -4296,8 +4299,8 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const tag_ty = self.air.typeOfIndex(inst); - const union_ty = self.air.typeOf(ty_op.operand); + const tag_ty = self.typeOfIndex(inst); + const union_ty = self.typeOf(ty_op.operand); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { @@ -4350,8 +4353,8 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -4479,8 +4482,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.bitSize(mod); const src_mcv = try self.resolveInst(ty_op.operand); @@ -4575,7 +4578,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); @@ -4745,7 +4748,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, true); @@ -4766,7 +4769,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); @@ -4876,12 +4879,12 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const tag = self.air.instructions.items(.tag)[inst]; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const abi_size: u32 = switch (ty.abiSize(mod)) { 1...16 => 16, 17...32 => 32, else => return self.fail("TODO implement airFloatSign for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), }; const scalar_bits = ty.scalarType(mod).floatBits(self.target.*); @@ -5005,7 +5008,7 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { fn airRound(self: *Self, inst: Air.Inst.Index, mode: u4) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5093,7 +5096,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const abi_size = @intCast(u32, ty.abiSize(mod)); const src_mcv = try self.resolveInst(un_op); @@ -5399,7 +5402,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; @@ -5407,7 +5410,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx }); defer for (reg_locks) |lock| self.register_manager.unlockReg(lock); - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const elem_size = elem_ty.abiSize(mod); const elem_rc = regClassForType(elem_ty, mod); @@ -5548,7 +5551,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { } const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_mcv = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const src_mcv = try self.resolveInst(bin_op.rhs); if (ptr_ty.ptrInfo().data.host_size > 0) { try self.packedStore(ptr_ty, ptr_mcv, src_mcv); @@ -5573,8 +5576,8 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { const mod = self.bin_file.options.module.?; - const ptr_field_ty = self.air.typeOfIndex(inst); - const ptr_container_ty = self.air.typeOf(operand); + const ptr_field_ty = self.typeOfIndex(inst); + const ptr_container_ty = self.typeOf(operand); const container_ty = ptr_container_ty.childType(); const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { .Auto, .Extern => container_ty.structFieldOffset(index, mod), @@ -5602,7 +5605,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; - const container_ty = self.air.typeOf(operand); + const container_ty = self.typeOf(operand); const container_rc = regClassForType(container_ty, mod); const field_ty = container_ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; @@ -5756,7 +5759,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const parent_ty = inst_ty.childType(); const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); @@ -5772,7 +5775,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue { const mod = self.bin_file.options.module.?; - const src_ty = self.air.typeOf(src_air); + const src_ty = self.typeOf(src_air); const src_mcv = try self.resolveInst(src_air); if (src_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); @@ -6358,8 +6361,8 @@ fn genBinOp( rhs_air: Air.Inst.Ref, ) !MCValue { const mod = self.bin_file.options.module.?; - const lhs_ty = self.air.typeOf(lhs_air); - const rhs_ty = self.air.typeOf(rhs_air); + const lhs_ty = self.typeOf(lhs_air); + const rhs_ty = self.typeOf(rhs_air); const abi_size = @intCast(u32, lhs_ty.abiSize(mod)); const maybe_mask_reg = switch (air_tag) { @@ -7918,6 +7921,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M } fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; // skip zero-bit arguments as they don't have a corresponding arg instruction var arg_index = self.arg_index; while (self.args[arg_index] == .none) arg_index += 1; @@ -7931,9 +7935,9 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { else => return self.fail("TODO implement arg for {}", .{dst_mcv}), } - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const name = self.owner.mod_fn.getParamName(self.bin_file.options.module.?, src_index); + const name = self.owner.mod_fn.getParamName(mod, src_index); try self.genArgDbgInfo(ty, name, dst_mcv); break :result dst_mcv; @@ -8050,7 +8054,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, @@ -8085,7 +8089,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier else => unreachable, } for (args, info.args) |arg, mc_arg| { - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { .none => {}, @@ -8112,7 +8116,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier defer if (ret_lock) |lock| self.register_manager.unlockReg(lock); for (args, info.args) |arg, mc_arg| { - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { .none, .load_frame => {}, @@ -8241,7 +8245,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); switch (self.ret_mcv.short) { .none => {}, .register => try self.load(self.ret_mcv.short, ptr_ty, ptr), @@ -8258,7 +8262,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); try self.spillEflagsIfOccupied(); self.eflags_inst = inst; @@ -8476,7 +8480,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - const op_ty = self.air.typeOf(un_op); + const op_ty = self.typeOf(un_op); const op_abi_size = @intCast(u32, op_ty.abiSize(mod)); const op_mcv = try self.resolveInst(un_op); const dst_reg = switch (op_mcv) { @@ -8496,7 +8500,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); + const err_union_ty = self.typeOf(pl_op.operand); const result = try self.genTry(inst, pl_op.operand, body, err_union_ty, false); return self.finishAir(inst, result, .{ .none, .none, .none }); } @@ -8505,7 +8509,7 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(); const result = try self.genTry(inst, extra.data.ptr, body, err_union_ty, true); return self.finishAir(inst, result, .{ .none, .none, .none }); } @@ -8584,7 +8588,7 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -8626,7 +8630,7 @@ fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 { fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); - const cond_ty = self.air.typeOf(pl_op.operand); + const cond_ty = self.typeOf(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; @@ -8871,7 +8875,7 @@ fn isNonErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCVa fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNull(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8879,7 +8883,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNullPtr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8887,7 +8891,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = switch (try self.isNull(inst, ty, operand)) { .eflags => |cc| .{ .eflags = cc.negate() }, else => unreachable, @@ -8898,7 +8902,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = switch (try self.isNullPtr(inst, ty, operand)) { .eflags => |cc| .{ .eflags = cc.negate() }, else => unreachable, @@ -8909,7 +8913,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isErr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8932,7 +8936,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); const result = try self.isErr(inst, ptr_ty.childType(), operand); @@ -8943,7 +8947,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNonErr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8966,7 +8970,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); const result = try self.isNonErr(inst, ptr_ty.childType(), operand); @@ -9032,7 +9036,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const condition = try self.resolveInst(pl_op.operand); - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); var extra_index: usize = switch_br.end; var case_i: u32 = 0; @@ -9119,7 +9123,7 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { const br = self.air.instructions.items(.data)[inst].br; const src_mcv = try self.resolveInst(br.operand); - const block_ty = self.air.typeOfIndex(br.block_inst); + const block_ty = self.typeOfIndex(br.block_inst); const block_unused = !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst); const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; @@ -9244,7 +9248,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(reg, self.air.typeOf(input), arg_mcv); + try self.genSetReg(reg, self.typeOf(input), arg_mcv); } { @@ -10169,7 +10173,7 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); try self.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; @@ -10179,8 +10183,8 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const result = result: { const dst_rc = regClassForType(dst_ty, mod); @@ -10241,8 +10245,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const slice_ty = self.air.typeOfIndex(inst); - const ptr_ty = self.air.typeOf(ty_op.operand); + const slice_ty = self.typeOfIndex(inst); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = array_ty.arrayLen(); @@ -10264,11 +10268,11 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = @intCast(u32, src_ty.bitSize(mod)); const src_signedness = if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const src_size = math.divCeil(u32, @max(switch (src_signedness) { .signed => src_bits, @@ -10318,8 +10322,8 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); - const dst_ty = self.air.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); const dst_bits = @intCast(u32, dst_ty.bitSize(mod)); const dst_signedness = if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; @@ -10371,8 +10375,8 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(extra.ptr); - const val_ty = self.air.typeOf(extra.expected_value); + const ptr_ty = self.typeOf(extra.ptr); + const val_ty = self.typeOf(extra.expected_value); const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); @@ -10712,10 +10716,10 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { const unused = self.liveness.isUnused(inst); - const ptr_ty = self.air.typeOf(pl_op.operand); + const ptr_ty = self.typeOf(pl_op.operand); const ptr_mcv = try self.resolveInst(pl_op.operand); - const val_ty = self.air.typeOf(extra.operand); + const val_ty = self.typeOf(extra.operand); const val_mcv = try self.resolveInst(extra.operand); const result = @@ -10726,7 +10730,7 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; - const ptr_ty = self.air.typeOf(atomic_load.ptr); + const ptr_ty = self.typeOf(atomic_load.ptr); const ptr_mcv = try self.resolveInst(atomic_load.ptr); const ptr_lock = switch (ptr_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -10747,10 +10751,10 @@ fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr_mcv = try self.resolveInst(bin_op.lhs); - const val_ty = self.air.typeOf(bin_op.rhs); + const val_ty = self.typeOf(bin_op.rhs); const val_mcv = try self.resolveInst(bin_op.rhs); const result = try self.atomicOp(ptr_mcv, val_mcv, ptr_ty, val_ty, true, null, order); @@ -10768,7 +10772,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); - const dst_ptr_ty = self.air.typeOf(bin_op.lhs); + const dst_ptr_ty = self.typeOf(bin_op.lhs); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10776,7 +10780,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock); const src_val = try self.resolveInst(bin_op.rhs); - const elem_ty = self.air.typeOf(bin_op.rhs); + const elem_ty = self.typeOf(bin_op.rhs); const src_val_lock: ?RegisterLock = switch (src_val) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10888,7 +10892,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); - const dst_ptr_ty = self.air.typeOf(bin_op.lhs); + const dst_ptr_ty = self.typeOf(bin_op.lhs); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10922,8 +10926,8 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const inst_ty = self.air.typeOfIndex(inst); - const enum_ty = self.air.typeOf(un_op); + const inst_ty = self.typeOfIndex(inst); + const enum_ty = self.typeOf(un_op); // We need a properly aligned and sized call frame to be able to call this function. { @@ -10964,7 +10968,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const err_ty = self.air.typeOf(un_op); + const err_ty = self.typeOf(un_op); const err_mcv = try self.resolveInst(un_op); const err_reg = try self.copyToTmpRegister(err_ty, err_mcv); const err_lock = self.register_manager.lockRegAssumeUnused(err_reg); @@ -11046,7 +11050,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { fn airSplat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const dst_rc = regClassForType(vector_ty, mod); const scalar_ty = vector_ty.scalarType(mod); @@ -11266,7 +11270,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -11411,10 +11415,10 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const result: MCValue = result: { - const union_ty = self.air.typeOfIndex(inst); + const union_ty = self.typeOfIndex(inst); const layout = union_ty.unionGetLayout(mod); - const src_ty = self.air.typeOf(extra.init); + const src_ty = self.typeOf(extra.init); const src_mcv = try self.resolveInst(extra.init); if (layout.tag_size == 0) { if (self.reuseOperand(inst, extra.init, 0, src_mcv)) break :result src_mcv; @@ -11461,7 +11465,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); if (!self.hasFeature(.fma)) return self.fail("TODO implement airMulAdd for {}", .{ ty.fmt(self.bin_file.options.module.?), @@ -11609,7 +11613,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; - const ty = self.air.typeOf(ref); + const ty = self.typeOf(ref); // If the type has no codegen bits, no need to store it. if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; @@ -11713,7 +11717,7 @@ fn resolveCallingConventionValues( defer self.gpa.free(param_types); fn_ty.fnParamTypes(param_types); // TODO: promote var arg types - for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.air.typeOf(arg); + for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.typeOf(arg); var result: CallMCValues = .{ .args = try self.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. @@ -12023,3 +12027,13 @@ fn hasAnyFeatures(self: *Self, features: anytype) bool { fn hasAllFeatures(self: *Self, features: anytype) bool { return Target.x86.featureSetHasAll(self.target.cpu.features, features); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/codegen/c.zig b/src/codegen/c.zig index da040a6fbbcd..a87f37b1c9d8 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -288,7 +288,7 @@ pub const Function = struct { const mod = f.object.dg.module; const val = f.air.value(ref, mod).?; - const ty = f.air.typeOf(ref); + const ty = f.typeOf(ref); const result: CValue = if (lowersToArray(ty, mod)) result: { const writer = f.object.code_header.writer(); @@ -355,7 +355,7 @@ pub const Function = struct { switch (c_value) { .constant => |inst| { const mod = f.object.dg.module; - const ty = f.air.typeOf(inst); + const ty = f.typeOf(inst); const val = f.air.value(inst, mod).?; return f.object.dg.renderValue(w, ty, val, location); }, @@ -368,7 +368,7 @@ pub const Function = struct { switch (c_value) { .constant => |inst| { const mod = f.object.dg.module; - const ty = f.air.typeOf(inst); + const ty = f.typeOf(inst); const val = f.air.value(inst, mod).?; try w.writeAll("(*"); try f.object.dg.renderValue(w, ty, val, .Other); @@ -382,7 +382,7 @@ pub const Function = struct { switch (c_value) { .constant => |inst| { const mod = f.object.dg.module; - const ty = f.air.typeOf(inst); + const ty = f.typeOf(inst); const val = f.air.value(inst, mod).?; try f.object.dg.renderValue(w, ty, val, .Other); try w.writeByte('.'); @@ -396,7 +396,7 @@ pub const Function = struct { switch (c_value) { .constant => |inst| { const mod = f.object.dg.module; - const ty = f.air.typeOf(inst); + const ty = f.typeOf(inst); const val = f.air.value(inst, mod).?; try w.writeByte('('); try f.object.dg.renderValue(w, ty, val, .Other); @@ -486,6 +486,16 @@ pub const Function = struct { f.object.dg.ctypes.deinit(gpa); f.object.dg.fwd_decl.deinit(); } + + fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { + const mod = f.object.dg.module; + return f.air.typeOf(inst, mod.intern_pool); + } + + fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type { + const mod = f.object.dg.module; + return f.air.typeOfIndex(inst, mod.intern_pool); + } }; /// This data is available when outputting .c code for a `Module`. @@ -2802,17 +2812,19 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { const mod = f.object.dg.module; + const ip = &mod.intern_pool; const air_tags = f.air.instructions.items(.tag); for (body) |inst| { - if (f.liveness.isUnused(inst) and !f.air.mustLower(inst)) { + if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip.*)) continue; - } const result_value = switch (air_tags[inst]) { // zig fmt: off .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies + .arg => try airArg(f, inst), .trap => try airTrap(f.object.writer()), @@ -2837,7 +2849,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none), .rem => blk: { const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType(mod); + const lhs_scalar_ty = f.typeOf(bin_op.lhs).scalarType(mod); // For binary operations @TypeOf(lhs)==@TypeOf(rhs), // so we only check one. break :blk if (lhs_scalar_ty.isInt(mod)) @@ -3088,7 +3100,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: []const u8) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -3107,7 +3119,7 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -3136,8 +3148,8 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - const ptr_ty = f.air.typeOf(bin_op.lhs); + const inst_ty = f.typeOfIndex(inst); + const ptr_ty = f.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); @@ -3169,7 +3181,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -3198,8 +3210,8 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - const slice_ty = f.air.typeOf(bin_op.lhs); + const inst_ty = f.typeOfIndex(inst); + const slice_ty = f.typeOf(bin_op.lhs); const elem_ty = slice_ty.elemType2(mod); const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); @@ -3226,7 +3238,7 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; @@ -3251,7 +3263,7 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const elem_type = inst_ty.elemType(); if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; @@ -3267,7 +3279,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.elemType(); if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; @@ -3282,7 +3294,7 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_cty = try f.typeToIndex(inst_ty, .parameter); const i = f.next_arg_index; @@ -3309,7 +3321,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const ptr_ty = f.air.typeOf(ty_op.operand); + const ptr_ty = f.typeOf(ty_op.operand); const ptr_scalar_ty = ptr_ty.scalarType(mod); const ptr_info = ptr_scalar_ty.ptrInfo().data; const src_ty = ptr_info.pointee_type; @@ -3399,7 +3411,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); const op_inst = Air.refToIndex(un_op); - const op_ty = f.air.typeOf(un_op); + const op_ty = f.typeOf(un_op); const ret_ty = if (is_ptr) op_ty.childType() else op_ty; var lowered_ret_buf: LowerFnRetTyBuffer = undefined; const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); @@ -3453,9 +3465,9 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); @@ -3478,13 +3490,13 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const dest_int_info = inst_scalar_ty.intInfo(mod); const dest_bits = dest_int_info.bits; const dest_c_bits = toCIntBits(dest_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); const scalar_int_info = scalar_ty.intInfo(mod); @@ -3572,7 +3584,7 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); const a = try Assignment.start(f, writer, inst_ty); try f.writeCValue(writer, local, .Other); @@ -3587,12 +3599,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { // *a = b; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = f.air.typeOf(bin_op.lhs); + const ptr_ty = f.typeOf(bin_op.lhs); const ptr_scalar_ty = ptr_ty.scalarType(mod); const ptr_info = ptr_scalar_ty.ptrInfo().data; const ptr_val = try f.resolveInst(bin_op.lhs); - const src_ty = f.air.typeOf(bin_op.rhs); + const src_ty = f.typeOf(bin_op.rhs); const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; @@ -3737,8 +3749,8 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); - const operand_ty = f.air.typeOf(bin_op.lhs); + const inst_ty = f.typeOfIndex(inst); + const operand_ty = f.typeOf(bin_op.lhs); const scalar_ty = operand_ty.scalarType(mod); const w = f.object.writer(); @@ -3769,14 +3781,14 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits); const op = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3802,7 +3814,7 @@ fn airBinOp( ) !CValue { const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const operand_ty = f.air.typeOf(bin_op.lhs); + const operand_ty = f.typeOf(bin_op.lhs); const scalar_ty = operand_ty.scalarType(mod); if ((scalar_ty.isInt(mod) and scalar_ty.bitSize(mod) > 64) or scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, info); @@ -3811,7 +3823,7 @@ fn airBinOp( const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3839,7 +3851,7 @@ fn airCmpOp( operator: std.math.CompareOperator, ) !CValue { const mod = f.object.dg.module; - const lhs_ty = f.air.typeOf(data.lhs); + const lhs_ty = f.typeOf(data.lhs); const scalar_ty = lhs_ty.scalarType(mod); const scalar_bits = scalar_ty.bitSize(mod); @@ -3855,12 +3867,12 @@ fn airCmpOp( if (scalar_ty.isRuntimeFloat()) return airCmpBuiltinCall(f, inst, data, operator, .operator, .none); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const lhs = try f.resolveInst(data.lhs); const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); - const rhs_ty = f.air.typeOf(data.rhs); + const rhs_ty = f.typeOf(data.rhs); const need_cast = lhs_ty.isSinglePointer() or rhs_ty.isSinglePointer(); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3891,7 +3903,7 @@ fn airEquality( const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const operand_ty = f.air.typeOf(bin_op.lhs); + const operand_ty = f.typeOf(bin_op.lhs); const operand_bits = operand_ty.bitSize(mod); if (operand_ty.isInt(mod) and operand_bits > 64) return airCmpBuiltinCall( @@ -3910,7 +3922,7 @@ fn airEquality( try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); @@ -3954,7 +3966,7 @@ fn airEquality( fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); @@ -3976,7 +3988,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const elem_ty = inst_scalar_ty.elemType2(mod); @@ -4019,7 +4031,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); if (inst_scalar_ty.isInt(mod) and inst_scalar_ty.bitSize(mod) > 64) @@ -4065,7 +4077,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { const len = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = inst_ty.slicePtrFieldType(&buf); @@ -4110,7 +4122,7 @@ fn airCall( const resolved_args = try gpa.alloc(CValue, args.len); defer gpa.free(resolved_args); for (resolved_args, args) |*resolved_arg, arg| { - const arg_ty = f.air.typeOf(arg); + const arg_ty = f.typeOf(arg); const arg_cty = try f.typeToIndex(arg_ty, .parameter); if (f.indexToCType(arg_cty).tag() == .void) { resolved_arg.* = .none; @@ -4141,7 +4153,7 @@ fn airCall( for (args) |arg| try bt.feed(arg); } - const callee_ty = f.air.typeOf(pl_op.operand); + const callee_ty = f.typeOf(pl_op.operand); const fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => callee_ty.childType(), @@ -4279,7 +4291,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { f.next_block_index += 1; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const result = if (inst_ty.tag() != .void and !f.liveness.isUnused(inst)) try f.allocLocal(inst, inst_ty) else @@ -4302,7 +4314,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.indent_writer.insertNewline(); // noreturn blocks have no `br` instructions reaching them, so we don't want a label - if (!f.air.typeOfIndex(inst).isNoReturn()) { + if (!f.typeOfIndex(inst).isNoReturn()) { // label must be followed by an expression, include an empty one. try writer.print("zig_block_{d}:;\n", .{block_id}); } @@ -4314,7 +4326,7 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.Try, pl_op.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = f.air.typeOf(pl_op.operand); + const err_union_ty = f.typeOf(pl_op.operand); return lowerTry(f, inst, pl_op.operand, body, err_union_ty, false); } @@ -4322,7 +4334,7 @@ fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.TryPtr, ty_pl.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = f.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = f.typeOf(extra.data.ptr).childType(); return lowerTry(f, inst, extra.data.ptr, body, err_union_ty, true); } @@ -4336,7 +4348,7 @@ fn lowerTry( ) !CValue { const mod = f.object.dg.module; const err_union = try f.resolveInst(operand); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); const payload_ty = err_union_ty.errorUnionPayload(); @@ -4404,7 +4416,7 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { // If result is .none then the value of the block is unused. if (result != .none) { - const operand_ty = f.air.typeOf(branch.operand); + const operand_ty = f.typeOf(branch.operand); const operand = try f.resolveInst(branch.operand); try reap(f, inst, &.{branch.operand}); @@ -4421,10 +4433,10 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const dest_ty = f.air.typeOfIndex(inst); + const dest_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const bitcasted = try bitcast(f, dest_ty, operand, operand_ty); try reap(f, inst, &.{ty_op.operand}); @@ -4684,7 +4696,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const condition = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); - const condition_ty = f.air.typeOf(pl_op.operand); + const condition_ty = f.typeOf(pl_op.operand); const switch_br = f.air.extraData(Air.SwitchBr, pl_op.payload); const writer = f.object.writer(); @@ -4784,7 +4796,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const result = result: { const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = if (inst_ty.hasRuntimeBitsIgnoreComptime(mod)) local: { const local = try f.allocLocal(inst, inst_ty); if (f.wantSafety()) { @@ -4814,7 +4826,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[1] == '{'; if (is_reg) { - const output_ty = if (output == .none) inst_ty else f.air.typeOf(output).childType(); + const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(); try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(output_ty, alignment); @@ -4847,7 +4859,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[0] == '{'; const input_val = try f.resolveInst(input); if (asmInputNeedsLocal(constraint, input_val)) { - const input_ty = f.air.typeOf(input); + const input_ty = f.typeOf(input); if (is_reg) try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(input_ty, alignment); @@ -5048,7 +5060,7 @@ fn airIsNull( try f.writeCValue(writer, operand, .Other); } - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); const optional_ty = if (is_ptr) operand_ty.childType() else operand_ty; var payload_buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&payload_buf); @@ -5083,7 +5095,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const opt_ty = f.air.typeOf(ty_op.operand); + const opt_ty = f.typeOf(ty_op.operand); var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); @@ -5092,7 +5104,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { return .none; } - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -5119,9 +5131,9 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const ptr_ty = f.air.typeOf(ty_op.operand); + const ptr_ty = f.typeOf(ty_op.operand); const opt_ty = ptr_ty.childType(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime(mod)) { return .{ .undef = inst_ty }; @@ -5149,11 +5161,11 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const opt_ty = operand_ty.elemType(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); if (opt_ty.optionalReprIsPayload(mod)) { if (f.liveness.isUnused(inst)) { @@ -5249,7 +5261,7 @@ fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue { const container_ptr_val = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); - const container_ptr_ty = f.air.typeOf(extra.struct_operand); + const container_ptr_ty = f.typeOf(extra.struct_operand); return fieldPtr(f, inst, container_ptr_ty, container_ptr_val, extra.field_index); } @@ -5258,7 +5270,7 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue const container_ptr_val = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const container_ptr_ty = f.air.typeOf(ty_op.operand); + const container_ptr_ty = f.typeOf(ty_op.operand); return fieldPtr(f, inst, container_ptr_ty, container_ptr_val, index); } @@ -5267,10 +5279,10 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const container_ptr_ty = f.air.typeOfIndex(inst); + const container_ptr_ty = f.typeOfIndex(inst); const container_ty = container_ptr_ty.childType(); - const field_ptr_ty = f.air.typeOf(extra.field_ptr); + const field_ptr_ty = f.typeOf(extra.field_ptr); const field_ptr_val = try f.resolveInst(extra.field_ptr); try reap(f, inst, &.{extra.field_ptr}); @@ -5334,7 +5346,7 @@ fn fieldPtr( ) !CValue { const mod = f.object.dg.module; const container_ty = container_ptr_ty.elemType(); - const field_ptr_ty = f.air.typeOfIndex(inst); + const field_ptr_ty = f.typeOfIndex(inst); // Ensure complete type definition is visible before accessing fields. _ = try f.typeToIndex(container_ty, .complete); @@ -5385,7 +5397,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{extra.struct_operand}); return .none; @@ -5393,7 +5405,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const struct_byval = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); - const struct_ty = f.air.typeOf(extra.struct_operand); + const struct_ty = f.typeOf(extra.struct_operand); const writer = f.object.writer(); // Ensure complete type definition is visible before accessing fields. @@ -5514,9 +5526,9 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer; @@ -5553,10 +5565,10 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; const writer = f.object.writer(); @@ -5589,9 +5601,9 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const repr_is_payload = inst_ty.optionalReprIsPayload(mod); - const payload_ty = f.air.typeOf(ty_op.operand); + const payload_ty = f.typeOf(ty_op.operand); const payload = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5621,7 +5633,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_ty = inst_ty.errorUnionSet(); @@ -5661,7 +5673,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); - const error_union_ty = f.air.typeOf(ty_op.operand).childType(); + const error_union_ty = f.typeOf(ty_op.operand).childType(); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); @@ -5684,7 +5696,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { // Then return the payload pointer (only if it is used) if (f.liveness.isUnused(inst)) return .none; - const local = try f.allocLocal(inst, f.air.typeOfIndex(inst)); + const local = try f.allocLocal(inst, f.typeOfIndex(inst)); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = &("); try f.writeCValueDeref(writer, operand); @@ -5711,7 +5723,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); const payload = try f.resolveInst(ty_op.operand); const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); @@ -5747,7 +5759,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const const writer = f.object.writer(); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); const local = try f.allocLocal(inst, Type.bool); const err_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(); @@ -5780,10 +5792,10 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const array_ty = f.air.typeOf(ty_op.operand).childType(); + const array_ty = f.typeOf(ty_op.operand).childType(); try f.writeCValueMember(writer, local, .{ .identifier = "ptr" }); try writer.writeAll(" = "); @@ -5812,10 +5824,10 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const target = f.object.dg.module.getTarget(); const operation = if (inst_ty.isRuntimeFloat() and operand_ty.isRuntimeFloat()) if (inst_ty.floatBits(target) < operand_ty.floatBits(target)) "trunc" else "extend" @@ -5855,9 +5867,9 @@ fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); try reap(f, inst, &.{un_op}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); @@ -5885,9 +5897,9 @@ fn airUnBuiltinCall( const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); @@ -5927,7 +5939,7 @@ fn airBinBuiltinCall( const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const operand_ty = f.air.typeOf(bin_op.lhs); + const operand_ty = f.typeOf(bin_op.lhs); const operand_cty = try f.typeToCType(operand_ty, .complete); const is_big = operand_cty.tag() == .array; @@ -5935,7 +5947,7 @@ fn airBinBuiltinCall( const rhs = try f.resolveInst(bin_op.rhs); if (!is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const scalar_ty = operand_ty.scalarType(mod); @@ -5984,9 +5996,9 @@ fn airCmpBuiltinCall( const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); - const operand_ty = f.air.typeOf(data.lhs); + const operand_ty = f.typeOf(data.lhs); const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); @@ -6032,11 +6044,11 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const ptr = try f.resolveInst(extra.ptr); const expected_value = try f.resolveInst(extra.expected_value); const new_value = try f.resolveInst(extra.new_value); - const ptr_ty = f.air.typeOf(extra.ptr); + const ptr_ty = f.typeOf(extra.ptr); const ty = ptr_ty.childType(); const writer = f.object.writer(); @@ -6137,8 +6149,8 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - const ptr_ty = f.air.typeOf(pl_op.operand); + const inst_ty = f.typeOfIndex(inst); + const ptr_ty = f.typeOf(pl_op.operand); const ty = ptr_ty.childType(); const ptr = try f.resolveInst(pl_op.operand); const operand = try f.resolveInst(extra.operand); @@ -6193,7 +6205,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const atomic_load = f.air.instructions.items(.data)[inst].atomic_load; const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); - const ptr_ty = f.air.typeOf(atomic_load.ptr); + const ptr_ty = f.typeOf(atomic_load.ptr); const ty = ptr_ty.childType(); const repr_ty = if (ty.isRuntimeFloat()) @@ -6201,7 +6213,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { else ty; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6227,7 +6239,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = f.air.typeOf(bin_op.lhs); + const ptr_ty = f.typeOf(bin_op.lhs); const ty = ptr_ty.childType(); const ptr = try f.resolveInst(bin_op.lhs); const element = try f.resolveInst(bin_op.rhs); @@ -6270,10 +6282,10 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const dest_ty = f.air.typeOf(bin_op.lhs); + const dest_ty = f.typeOf(bin_op.lhs); const dest_slice = try f.resolveInst(bin_op.lhs); const value = try f.resolveInst(bin_op.rhs); - const elem_ty = f.air.typeOf(bin_op.rhs); + const elem_ty = f.typeOf(bin_op.rhs); const elem_abi_size = elem_ty.abiSize(mod); const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; const writer = f.object.writer(); @@ -6393,8 +6405,8 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { const bin_op = f.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try f.resolveInst(bin_op.lhs); const src_ptr = try f.resolveInst(bin_op.rhs); - const dest_ty = f.air.typeOf(bin_op.lhs); - const src_ty = f.air.typeOf(bin_op.rhs); + const dest_ty = f.typeOf(bin_op.lhs); + const src_ty = f.typeOf(bin_op.rhs); const writer = f.object.writer(); try writer.writeAll("memcpy("); @@ -6434,7 +6446,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const union_ty = f.air.typeOf(bin_op.lhs).childType(); + const union_ty = f.typeOf(bin_op.lhs).childType(); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; const tag_ty = union_ty.unionTagTypeSafety().?; @@ -6455,11 +6467,11 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const union_ty = f.air.typeOf(ty_op.operand); + const union_ty = f.typeOf(ty_op.operand); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); const a = try Assignment.start(f, writer, inst_ty); @@ -6473,8 +6485,8 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; - const inst_ty = f.air.typeOfIndex(inst); - const enum_ty = f.air.typeOf(un_op); + const inst_ty = f.typeOfIndex(inst); + const enum_ty = f.typeOf(un_op); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); @@ -6494,7 +6506,7 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const local = try f.allocLocal(inst, inst_ty); @@ -6513,7 +6525,7 @@ fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); @@ -6539,7 +6551,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { const rhs = try f.resolveInst(extra.rhs); try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6570,7 +6582,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const lhs = try f.resolveInst(extra.a); const rhs = try f.resolveInst(extra.b); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6607,10 +6619,10 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { const reduce = f.air.instructions.items(.data)[inst].reduce; const target = mod.getTarget(); - const scalar_ty = f.air.typeOfIndex(inst); + const scalar_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(reduce.operand); try reap(f, inst, &.{reduce.operand}); - const operand_ty = f.air.typeOf(reduce.operand); + const operand_ty = f.typeOf(reduce.operand); const writer = f.object.writer(); const use_operator = scalar_ty.bitSize(mod) <= 64; @@ -6762,7 +6774,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const len = @intCast(usize, inst_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]); const gpa = f.object.dg.gpa; @@ -6892,10 +6904,10 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; - const union_ty = f.air.typeOfIndex(inst); + const union_ty = f.typeOfIndex(inst); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_name = union_obj.fields.keys()[extra.field_index]; - const payload_ty = f.air.typeOf(extra.init); + const payload_ty = f.typeOf(extra.init); const payload = try f.resolveInst(extra.init); try reap(f, inst, &.{extra.init}); @@ -6965,7 +6977,7 @@ fn airWasmMemorySize(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); @@ -6979,7 +6991,7 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); const local = try f.allocLocal(inst, inst_ty); @@ -6999,7 +7011,7 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); @@ -7025,7 +7037,7 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); @@ -7054,7 +7066,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); @@ -7088,7 +7100,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { const addend = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); @@ -7114,7 +7126,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { } fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const fn_cty = try f.typeToCType(f.object.dg.decl.?.ty, .complete); const param_len = fn_cty.castTag(.varargs_function).?.data.param_types.len; @@ -7133,7 +7145,7 @@ fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { fn airCVaArg(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const va_list = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -7164,7 +7176,7 @@ fn airCVaEnd(f: *Function, inst: Air.Inst.Index) !CValue { fn airCVaCopy(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const va_list = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c3d3da0d322e..2e42e8e3fc20 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4497,7 +4497,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const llvm_val = try self.resolveValue(.{ - .ty = self.air.typeOf(inst), + .ty = self.typeOf(inst), .val = self.air.value(inst, mod).?, }); gop.value_ptr.* = llvm_val; @@ -4528,11 +4528,12 @@ pub const FuncGen = struct { } fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { + const mod = self.dg.module; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const opt_value: ?*llvm.Value = switch (air_tags[inst]) { // zig fmt: off @@ -4751,6 +4752,8 @@ pub const FuncGen = struct { .constant => unreachable, .const_ty => unreachable, + .interned => unreachable, + .unreach => self.airUnreach(inst), .dbg_stmt => self.airDbgStmt(inst), .dbg_inline_begin => try self.airDbgInlineBegin(inst), @@ -4781,7 +4784,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const callee_ty = self.air.typeOf(pl_op.operand); + const callee_ty = self.typeOf(pl_op.operand); const mod = self.dg.module; const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, @@ -4815,7 +4818,7 @@ pub const FuncGen = struct { .no_bits => continue, .byval => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const llvm_param_ty = try self.dg.lowerType(param_ty); if (isByRef(param_ty, mod)) { @@ -4829,7 +4832,7 @@ pub const FuncGen = struct { }, .byref => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); if (isByRef(param_ty, mod)) { try llvm_args.append(llvm_arg); @@ -4844,7 +4847,7 @@ pub const FuncGen = struct { }, .byref_mut => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const alignment = param_ty.abiAlignment(mod); @@ -4865,7 +4868,7 @@ pub const FuncGen = struct { }, .abi_sized_int => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); const int_llvm_ty = self.context.intType(abi_size * 8); @@ -4901,7 +4904,7 @@ pub const FuncGen = struct { }, .multiple_llvm_types => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len]; const llvm_arg = try self.resolveInst(arg); const is_by_ref = isByRef(param_ty, mod); @@ -4930,7 +4933,7 @@ pub const FuncGen = struct { }, .float_array => |count| { const arg = args[it.zig_index - 1]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); @@ -4950,7 +4953,7 @@ pub const FuncGen = struct { .i32_array, .i64_array => |arr_len| { const elem_size: u8 = if (lowering == .i32_array) 32 else 64; const arg = args[it.zig_index - 1]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); @@ -5094,7 +5097,7 @@ pub const FuncGen = struct { fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ret_ty = self.air.typeOf(un_op); + const ret_ty = self.typeOf(un_op); if (self.ret_ptr) |ret_ptr| { const operand = try self.resolveInst(un_op); var ptr_ty_payload: Type.Payload.ElemType = .{ @@ -5150,7 +5153,7 @@ pub const FuncGen = struct { fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(); const fn_info = self.dg.decl.ty.fnInfo(); const mod = self.dg.module; @@ -5236,7 +5239,7 @@ pub const FuncGen = struct { fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; - const va_list_ty = self.air.typeOfIndex(inst); + const va_list_ty = self.typeOfIndex(inst); const llvm_va_list_ty = try self.dg.lowerType(va_list_ty); const result_alignment = va_list_ty.abiAlignment(mod); @@ -5266,7 +5269,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const operand_ty = self.air.typeOf(bin_op.lhs); + const operand_ty = self.typeOf(bin_op.lhs); return self.cmp(lhs, rhs, operand_ty, op); } @@ -5279,7 +5282,7 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const vec_ty = self.air.typeOf(extra.lhs); + const vec_ty = self.typeOf(extra.lhs); const cmp_op = extra.compareOperator(); return self.cmp(lhs, rhs, vec_ty, cmp_op); @@ -5396,12 +5399,12 @@ pub const FuncGen = struct { } fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const parent_bb = self.context.createBasicBlock("Block"); - const mod = self.dg.module; if (inst_ty.isNoReturn()) { try self.genBody(body); @@ -5453,7 +5456,7 @@ pub const FuncGen = struct { const block = self.blocks.get(branch.block_inst).?; // Add the values to the lists only if the break provides a value. - const operand_ty = self.air.typeOf(branch.operand); + const operand_ty = self.typeOf(branch.operand); const mod = self.dg.module; if (operand_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.zigTypeTag(mod) == .Fn) { const val = try self.resolveInst(branch.operand); @@ -5497,8 +5500,8 @@ pub const FuncGen = struct { const err_union = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const err_union_ty = self.typeOf(pl_op.operand); + const payload_ty = self.typeOfIndex(inst); const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); @@ -5509,7 +5512,7 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try self.resolveInst(extra.data.ptr); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(); const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused); } @@ -5650,7 +5653,7 @@ pub const FuncGen = struct { // would have been emitted already. Also the main loop in genBody can // be while(true) instead of for(body), which will eliminate 1 branch on // a hot path. - if (body.len == 0 or !self.air.typeOfIndex(body[body.len - 1]).isNoReturn()) { + if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn()) { _ = self.builder.buildBr(loop_block); } return null; @@ -5659,11 +5662,11 @@ pub const FuncGen = struct { fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const array_ty = operand_ty.childType(); const llvm_usize = try self.dg.lowerType(Type.usize); const len = llvm_usize.constInt(array_ty.arrayLen(), .False); - const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const slice_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, ""); @@ -5683,10 +5686,10 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_scalar_ty = operand_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); const target = mod.getTarget(); @@ -5743,10 +5746,10 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_scalar_ty = operand_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); @@ -5832,7 +5835,7 @@ pub const FuncGen = struct { fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); - const slice_ptr_ty = self.air.typeOf(ty_op.operand); + const slice_ptr_ty = self.typeOf(ty_op.operand); const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType()); return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, ""); @@ -5842,7 +5845,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(); @@ -5863,7 +5866,7 @@ pub const FuncGen = struct { fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); @@ -5878,7 +5881,7 @@ pub const FuncGen = struct { const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); const array_llvm_val = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try self.dg.lowerType(array_ty); @@ -5920,7 +5923,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); const base_ptr = try self.resolveInst(bin_op.lhs); @@ -5948,7 +5951,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); @@ -5973,7 +5976,7 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; const struct_ptr = try self.resolveInst(struct_field.struct_operand); - const struct_ptr_ty = self.air.typeOf(struct_field.struct_operand); + const struct_ptr_ty = self.typeOf(struct_field.struct_operand); return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, struct_field.field_index); } @@ -5984,7 +5987,7 @@ pub const FuncGen = struct { ) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try self.resolveInst(ty_op.operand); - const struct_ptr_ty = self.air.typeOf(ty_op.operand); + const struct_ptr_ty = self.typeOf(ty_op.operand); return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index); } @@ -5993,7 +5996,7 @@ pub const FuncGen = struct { const inst = body_tail[0]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = self.air.typeOf(struct_field.struct_operand); + const struct_ty = self.typeOf(struct_field.struct_operand); const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); @@ -6234,7 +6237,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); - const ptr_ty = self.air.typeOf(pl_op.operand); + const ptr_ty = self.typeOf(pl_op.operand); const di_local_var = dib.createAutoVariable( self.di_scope.?, @@ -6259,7 +6262,7 @@ pub const FuncGen = struct { const dib = self.dg.object.di_builder orelse return null; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); - const operand_ty = self.air.typeOf(pl_op.operand); + const operand_ty = self.typeOf(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); if (needDbgVarWorkaround(self.dg)) { @@ -6361,7 +6364,7 @@ pub const FuncGen = struct { llvm_ret_indirect[i] = (output != .none) and constraintAllowsMemory(constraint); if (output != .none) { const output_inst = try self.resolveInst(output); - const output_ty = self.air.typeOf(output); + const output_ty = self.typeOf(output); assert(output_ty.zigTypeTag(mod) == .Pointer); const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType()); @@ -6379,7 +6382,7 @@ pub const FuncGen = struct { llvm_ret_i += 1; } } else { - const ret_ty = self.air.typeOfIndex(inst); + const ret_ty = self.typeOfIndex(inst); llvm_ret_types[llvm_ret_i] = try self.dg.lowerType(ret_ty); llvm_ret_i += 1; } @@ -6414,7 +6417,7 @@ pub const FuncGen = struct { extra_i += (constraint.len + name.len + (2 + 3)) / 4; const arg_llvm_value = try self.resolveInst(input); - const arg_ty = self.air.typeOf(input); + const arg_ty = self.typeOf(input); var llvm_elem_ty: ?*llvm.Type = null; if (isByRef(arg_ty, mod)) { llvm_elem_ty = try self.dg.lowerPtrElemTy(arg_ty); @@ -6636,7 +6639,7 @@ pub const FuncGen = struct { if (output != .none) { const output_ptr = try self.resolveInst(output); - const output_ptr_ty = self.air.typeOf(output); + const output_ptr_ty = self.typeOf(output); const store_inst = self.builder.buildStore(output_value, output_ptr); store_inst.setAlignment(output_ptr_ty.ptrAlignment(mod)); @@ -6657,7 +6660,7 @@ pub const FuncGen = struct { ) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const optional_llvm_ty = try self.dg.lowerType(optional_ty); var buf: Type.Payload.ElemType = undefined; @@ -6706,7 +6709,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(); const err_set_ty = try self.dg.lowerType(Type.anyerror); @@ -6746,7 +6749,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand).childType(); + const optional_ty = self.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -6768,7 +6771,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand).childType(); + const optional_ty = self.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); const non_null_bit = self.context.intType(8).constInt(1, .False); @@ -6801,8 +6804,8 @@ pub const FuncGen = struct { const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOf(ty_op.operand); + const payload_ty = self.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; if (optional_ty.optionalReprIsPayload(mod)) { @@ -6824,9 +6827,9 @@ pub const FuncGen = struct { const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -6859,7 +6862,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { const err_llvm_ty = try self.dg.lowerType(Type.anyerror); @@ -6893,7 +6896,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const err_union_ty = self.air.typeOf(ty_op.operand).childType(); + const err_union_ty = self.typeOf(ty_op.operand).childType(); const payload_ty = err_union_ty.errorUnionPayload(); const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); @@ -6946,12 +6949,12 @@ pub const FuncGen = struct { fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); const non_null_bit = self.context.intType(8).constInt(1, .False); comptime assert(optional_layout_version == 3); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); if (optional_ty.optionalReprIsPayload(mod)) { return operand; } @@ -6976,9 +6979,9 @@ pub const FuncGen = struct { fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_un_ty = self.air.typeOfIndex(inst); + const err_un_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } @@ -7009,7 +7012,7 @@ pub const FuncGen = struct { fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_un_ty = self.air.typeOfIndex(inst); + const err_un_ty = self.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -7069,7 +7072,7 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Bin, data.payload).data; const vector_ptr = try self.resolveInst(data.vector_ptr); - const vector_ptr_ty = self.air.typeOf(data.vector_ptr); + const vector_ptr_ty = self.typeOf(data.vector_ptr); const index = try self.resolveInst(extra.lhs); const operand = try self.resolveInst(extra.rhs); @@ -7090,7 +7093,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod); + const scalar_ty = self.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs }); if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMin(lhs, rhs, ""); @@ -7102,7 +7105,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod); + const scalar_ty = self.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs }); if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMax(lhs, rhs, ""); @@ -7114,7 +7117,7 @@ pub const FuncGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const llvm_slice_ty = try self.dg.lowerType(inst_ty); // In case of slicing a global, the result type looks something like `{ i8*, i64 }` @@ -7130,7 +7133,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs }); @@ -7153,7 +7156,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); @@ -7169,7 +7172,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs }); @@ -7192,7 +7195,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); @@ -7207,7 +7210,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs }); @@ -7230,7 +7233,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); @@ -7244,7 +7247,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); } @@ -7256,7 +7259,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { @@ -7274,7 +7277,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { @@ -7314,7 +7317,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); @@ -7329,7 +7332,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); @@ -7344,7 +7347,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const inst_llvm_ty = try self.dg.lowerType(inst_ty); const scalar_ty = inst_ty.scalarType(mod); @@ -7386,7 +7389,7 @@ pub const FuncGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); switch (ptr_ty.ptrSize()) { .One => { @@ -7412,7 +7415,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const negative_offset = self.builder.buildNeg(offset, ""); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); switch (ptr_ty.ptrSize()) { .One => { @@ -7447,9 +7450,9 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); + const lhs_ty = self.typeOf(extra.lhs); const scalar_ty = lhs_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; @@ -7735,7 +7738,7 @@ pub const FuncGen = struct { const mulend2 = try self.resolveInst(extra.rhs); const addend = try self.resolveInst(pl_op.operand); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); return self.buildFloatOp(.fma, ty, 3, .{ mulend1, mulend2, addend }); } @@ -7747,12 +7750,12 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const llvm_dest_ty = try self.dg.lowerType(dest_ty); const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) @@ -7821,8 +7824,8 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); @@ -7841,8 +7844,8 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_type = self.air.typeOf(bin_op.lhs); - const rhs_type = self.air.typeOf(bin_op.rhs); + const lhs_type = self.typeOf(bin_op.lhs); + const rhs_type = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_type.scalarType(mod); const rhs_scalar_ty = rhs_type.scalarType(mod); @@ -7860,8 +7863,8 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); const lhs_bits = lhs_scalar_ty.bitSize(mod); @@ -7903,8 +7906,8 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); @@ -7932,11 +7935,11 @@ pub const FuncGen = struct { fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_info = dest_ty.intInfo(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_info = operand_ty.intInfo(mod); if (operand_info.bits < dest_info.bits) { @@ -7954,7 +7957,7 @@ pub const FuncGen = struct { fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); return self.builder.buildTrunc(operand, dest_llvm_ty, ""); } @@ -7962,8 +7965,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -7992,8 +7995,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -8021,16 +8024,16 @@ pub const FuncGen = struct { fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const operand_ptr = self.sliceOrArrayPtr(operand, ptr_ty); - const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); return self.builder.buildPtrToInt(operand_ptr, dest_llvm_ty, ""); } fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); - const inst_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); return self.bitCast(operand, operand_ty, inst_ty); } @@ -8159,17 +8162,17 @@ pub const FuncGen = struct { } fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const arg_val = self.args[self.arg_index]; self.arg_index += 1; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); if (self.dg.object.di_builder) |dib| { if (needDbgVarWorkaround(self.dg)) { return arg_val; } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const mod = self.dg.module; const func = self.dg.decl.getFunction().?; const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; @@ -8203,9 +8206,9 @@ pub const FuncGen = struct { } fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const ptr_ty = self.air.typeOfIndex(inst); - const pointee_type = ptr_ty.childType(); const mod = self.dg.module; + const ptr_ty = self.typeOfIndex(inst); + const pointee_type = ptr_ty.childType(); if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const pointee_llvm_ty = try self.dg.lowerType(pointee_type); @@ -8214,9 +8217,9 @@ pub const FuncGen = struct { } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const ptr_ty = self.air.typeOfIndex(inst); - const ret_ty = ptr_ty.childType(); const mod = self.dg.module; + const ptr_ty = self.typeOfIndex(inst); + const ret_ty = ptr_ty.childType(); if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = try self.dg.lowerType(ret_ty); @@ -8232,7 +8235,7 @@ pub const FuncGen = struct { fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); const mod = self.dg.module; @@ -8285,7 +8288,7 @@ pub const FuncGen = struct { const mod = fg.dg.module; const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[inst].ty_op; - const ptr_ty = fg.air.typeOf(ty_op.operand); + const ptr_ty = fg.typeOf(ty_op.operand); const ptr_info = ptr_ty.ptrInfo().data; const ptr = try fg.resolveInst(ty_op.operand); @@ -8361,7 +8364,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(extra.ptr); var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); - const operand_ty = self.air.typeOf(extra.ptr).elemType(); + const operand_ty = self.typeOf(extra.ptr).elemType(); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating @@ -8383,7 +8386,7 @@ pub const FuncGen = struct { ); result.setWeak(llvm.Bool.fromBool(is_weak)); - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); var payload = self.builder.buildExtractValue(result, 0, ""); if (opt_abi_ty != null) { @@ -8406,7 +8409,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); - const ptr_ty = self.air.typeOf(pl_op.operand); + const ptr_ty = self.typeOf(pl_op.operand); const operand_ty = ptr_ty.elemType(); const operand = try self.resolveInst(extra.operand); const is_signed_int = operand_ty.isSignedInt(mod); @@ -8461,7 +8464,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); - const ptr_ty = self.air.typeOf(atomic_load.ptr); + const ptr_ty = self.typeOf(atomic_load.ptr); const ptr_info = ptr_ty.ptrInfo().data; const elem_ty = ptr_info.pointee_type; if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) @@ -8494,7 +8497,7 @@ pub const FuncGen = struct { ) !?*llvm.Value { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; const ptr = try self.resolveInst(bin_op.lhs); @@ -8517,8 +8520,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const elem_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const elem_ty = self.typeOf(bin_op.rhs); const module = self.dg.module; const target = module.getTarget(); const dest_ptr_align = ptr_ty.ptrAlignment(mod); @@ -8641,9 +8644,9 @@ pub const FuncGen = struct { fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); - const dest_ptr_ty = self.air.typeOf(bin_op.lhs); + const dest_ptr_ty = self.typeOf(bin_op.lhs); const src_slice = try self.resolveInst(bin_op.rhs); - const src_ptr_ty = self.air.typeOf(bin_op.rhs); + const src_ptr_ty = self.typeOf(bin_op.rhs); const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty); const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); @@ -8663,7 +8666,7 @@ pub const FuncGen = struct { fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const un_ty = self.air.typeOf(bin_op.lhs).childType(); + const un_ty = self.typeOf(bin_op.lhs).childType(); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_ptr = try self.resolveInst(bin_op.lhs); @@ -8684,7 +8687,7 @@ pub const FuncGen = struct { fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const un_ty = self.air.typeOf(ty_op.operand); + const un_ty = self.typeOf(ty_op.operand); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_handle = try self.resolveInst(ty_op.operand); @@ -8708,7 +8711,7 @@ pub const FuncGen = struct { fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); return self.buildFloatOp(op, operand_ty, 1, .{operand}); } @@ -8718,7 +8721,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); return self.buildFloatOp(.neg, operand_ty, 1, .{operand}); } @@ -8726,7 +8729,7 @@ pub const FuncGen = struct { fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const llvm_i1 = self.context.intType(1); @@ -8735,7 +8738,7 @@ pub const FuncGen = struct { const params = [_]*llvm.Value{ operand, llvm_i1.constNull() }; const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); const bits = operand_ty.intInfo(mod).bits; @@ -8752,7 +8755,7 @@ pub const FuncGen = struct { fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const params = [_]*llvm.Value{operand}; @@ -8760,7 +8763,7 @@ pub const FuncGen = struct { const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); const bits = operand_ty.intInfo(mod).bits; @@ -8777,7 +8780,7 @@ pub const FuncGen = struct { fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); var bits = operand_ty.intInfo(mod).bits; assert(bits % 8 == 0); @@ -8815,7 +8818,7 @@ pub const FuncGen = struct { const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { @@ -8876,7 +8879,7 @@ pub const FuncGen = struct { fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const enum_ty = self.air.typeOf(un_op); + const enum_ty = self.typeOf(un_op); const llvm_fn = try self.getIsNamedEnumValueFunction(enum_ty); const params = [_]*llvm.Value{operand}; @@ -8954,7 +8957,7 @@ pub const FuncGen = struct { fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const enum_ty = self.air.typeOf(un_op); + const enum_ty = self.typeOf(un_op); const llvm_fn = try self.getEnumTagNameFunction(enum_ty); const params = [_]*llvm.Value{operand}; @@ -9083,7 +9086,7 @@ pub const FuncGen = struct { fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const slice_ty = self.air.typeOfIndex(inst); + const slice_ty = self.typeOfIndex(inst); const slice_llvm_ty = try self.dg.lowerType(slice_ty); const error_name_table_ptr = try self.getErrorNameTable(); @@ -9097,7 +9100,7 @@ pub const FuncGen = struct { fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const scalar = try self.resolveInst(ty_op.operand); - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); return self.builder.buildVectorSplat(len, scalar, ""); } @@ -9120,7 +9123,7 @@ pub const FuncGen = struct { const b = try self.resolveInst(extra.b); const mask = self.air.values[extra.mask]; const mask_len = extra.mask_len; - const a_len = self.air.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(); // LLVM uses integers larger than the length of the first array to // index into the second array. This was deemed unnecessarily fragile @@ -9219,8 +9222,8 @@ pub const FuncGen = struct { const reduce = self.air.instructions.items(.data)[inst].reduce; const operand = try self.resolveInst(reduce.operand); - const operand_ty = self.air.typeOf(reduce.operand); - const scalar_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(reduce.operand); + const scalar_ty = self.typeOfIndex(inst); switch (reduce.operation) { .And => return self.builder.buildAndReduce(operand), @@ -9300,12 +9303,12 @@ pub const FuncGen = struct { } fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = try self.dg.lowerType(result_ty); - const mod = self.dg.module; switch (result_ty.zigTypeTag(mod)) { .Vector => { @@ -9370,7 +9373,7 @@ pub const FuncGen = struct { const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); var field_ptr_payload: Type.Payload.Pointer = .{ .data = .{ - .pointee_type = self.air.typeOf(elem), + .pointee_type = self.typeOf(elem), .@"align" = result_ty.structFieldAlign(i, mod), .@"addrspace" = .generic, }, @@ -9440,7 +9443,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; - const union_ty = self.air.typeOfIndex(inst); + const union_ty = self.typeOfIndex(inst); const union_llvm_ty = try self.dg.lowerType(union_ty); const layout = union_ty.unionGetLayout(mod); const union_obj = union_ty.cast(Type.Payload.Union).?.data; @@ -9643,7 +9646,7 @@ pub const FuncGen = struct { fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const llvm_dest_ty = try self.dg.lowerType(inst_ty); @@ -9830,7 +9833,7 @@ pub const FuncGen = struct { switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_ty_info = result_ty.ptrInfo().data; if (result_ty_info.host_size != 0) { @@ -10172,6 +10175,16 @@ pub const FuncGen = struct { ); return call; } + + fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { + const mod = fg.dg.module; + return fg.air.typeOf(inst, mod.intern_pool); + } + + fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { + const mod = fg.dg.module; + return fg.air.typeOfIndex(inst, mod.intern_pool); + } }; fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void { @@ -10833,7 +10846,7 @@ const ParamTypeIterator = struct { if (it.zig_index >= args.len) { return null; } else { - return nextInner(it, fg.air.typeOf(args[it.zig_index])); + return nextInner(it, fg.typeOf(args[it.zig_index])); } } else { return nextInner(it, it.fn_info.param_types[it.zig_index]); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index b8c8466427df..41abbde1a0f1 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -233,7 +233,7 @@ pub const DeclGen = struct { fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { const mod = self.module; if (self.air.value(inst, mod)) |val| { - const ty = self.air.typeOf(inst); + const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (val.tag()) { .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, @@ -1720,10 +1720,11 @@ pub const DeclGen = struct { } fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.module; + const ip = &mod.intern_pool; // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) return; - } const air_tags = self.air.instructions.items(.tag); const maybe_result_id: ?IdRef = switch (air_tags[inst]) { @@ -1847,7 +1848,7 @@ pub const DeclGen = struct { const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocId(); - const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst)); + const result_type_id = try self.resolveTypeId(self.typeOfIndex(inst)); try self.func.body.emit(self.spv.gpa, opcode, .{ .id_result_type = result_type_id, .id_result = result_id, @@ -1862,7 +1863,7 @@ pub const DeclGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); - const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst)); + const result_type_id = try self.resolveTypeId(self.typeOfIndex(inst)); // the shift and the base must be the same type in SPIR-V, but in Zig the shift is a smaller int. const shift_id = self.spv.allocId(); @@ -1907,15 +1908,15 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; // LHS and RHS are guaranteed to have the same type, and AIR guarantees // the result to be the same as the LHS and RHS, which matches SPIR-V. - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const bin_op = self.air.instructions.items(.data)[inst].bin_op; var lhs_id = try self.resolve(bin_op.lhs); var rhs_id = try self.resolve(bin_op.rhs); const result_ty_ref = try self.resolveType(ty, .direct); - assert(self.air.typeOf(bin_op.lhs).eql(ty, self.module)); - assert(self.air.typeOf(bin_op.rhs).eql(ty, self.module)); + assert(self.typeOf(bin_op.lhs).eql(ty, self.module)); + assert(self.typeOf(bin_op.rhs).eql(ty, self.module)); // Binary operations are generally applicable to both scalar and vector operations // in SPIR-V, but int and float versions of operations require different opcodes. @@ -1971,8 +1972,8 @@ pub const DeclGen = struct { const lhs = try self.resolve(extra.lhs); const rhs = try self.resolve(extra.rhs); - const operand_ty = self.air.typeOf(extra.lhs); - const result_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(extra.lhs); + const result_ty = self.typeOfIndex(inst); const info = try self.arithmeticTypeInfo(operand_ty); switch (info.class) { @@ -2064,14 +2065,14 @@ pub const DeclGen = struct { fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const mod = self.module; if (self.liveness.isUnused(inst)) return null; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolve(extra.a); const b = try self.resolve(extra.b); const mask = self.air.values[extra.mask]; const mask_len = extra.mask_len; - const a_len = self.air.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(ty); @@ -2162,8 +2163,8 @@ pub const DeclGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_id = try self.resolve(bin_op.lhs); const offset_id = try self.resolve(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const result_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOf(bin_op.lhs); + const result_ty = self.typeOfIndex(inst); return try self.ptrAdd(result_ty, ptr_ty, ptr_id, offset_id); } @@ -2173,11 +2174,11 @@ pub const DeclGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_id = try self.resolve(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const offset_id = try self.resolve(bin_op.rhs); - const offset_ty = self.air.typeOf(bin_op.rhs); + const offset_ty = self.typeOf(bin_op.rhs); const offset_ty_ref = try self.resolveType(offset_ty, .direct); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const negative_offset_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpSNegate, .{ @@ -2298,8 +2299,8 @@ pub const DeclGen = struct { const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const bool_ty_id = try self.resolveTypeId(Type.bool); - const ty = self.air.typeOf(bin_op.lhs); - assert(ty.eql(self.air.typeOf(bin_op.rhs), self.module)); + const ty = self.typeOf(bin_op.lhs); + assert(ty.eql(self.typeOf(bin_op.rhs), self.module)); return try self.cmp(op, bool_ty_id, ty, lhs_id, rhs_id); } @@ -2337,8 +2338,8 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const result_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const result_ty = self.typeOfIndex(inst); return try self.bitCast(result_ty, operand_ty, operand_id); } @@ -2347,7 +2348,7 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_ty_id = try self.resolveTypeId(dest_ty); const mod = self.module; @@ -2391,10 +2392,10 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_id = try self.resolve(ty_op.operand); const operand_info = try self.arithmeticTypeInfo(operand_ty); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_ty_id = try self.resolveTypeId(dest_ty); const result_id = self.spv.allocId(); @@ -2418,7 +2419,7 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_info = try self.arithmeticTypeInfo(dest_ty); const dest_ty_id = try self.resolveTypeId(dest_ty); @@ -2455,20 +2456,20 @@ pub const DeclGen = struct { fn airSliceField(self: *DeclGen, inst: Air.Inst.Index, field: u32) !?IdRef { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const field_ty = self.air.typeOfIndex(inst); + const field_ty = self.typeOfIndex(inst); const operand_id = try self.resolve(ty_op.operand); return try self.extractField(field_ty, operand_id, field); } fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); - const ptr_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOfIndex(inst); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); const slice_ptr = try self.extractField(ptr_ty, slice_id, 0); @@ -2477,7 +2478,7 @@ pub const DeclGen = struct { fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); @@ -2514,7 +2515,7 @@ pub const DeclGen = struct { const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); // TODO: Make this return a null ptr or something if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -2526,7 +2527,7 @@ pub const DeclGen = struct { fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2544,7 +2545,7 @@ pub const DeclGen = struct { fn airGetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const un_ty = self.air.typeOf(ty_op.operand); + const un_ty = self.typeOf(ty_op.operand); const mod = self.module; const layout = un_ty.unionGetLayout(mod); @@ -2565,7 +2566,7 @@ pub const DeclGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = self.air.typeOf(struct_field.struct_operand); + const struct_ty = self.typeOf(struct_field.struct_operand); const object_id = try self.resolve(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); @@ -2604,8 +2605,8 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try self.resolve(ty_op.operand); - const struct_ptr_ty = self.air.typeOf(ty_op.operand); - const result_ptr_ty = self.air.typeOfIndex(inst); + const struct_ptr_ty = self.typeOf(ty_op.operand); + const result_ptr_ty = self.typeOfIndex(inst); return try self.structFieldPtr(result_ptr_ty, struct_ptr_ty, struct_ptr, field_index); } @@ -2661,7 +2662,7 @@ pub const DeclGen = struct { fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; - const ptr_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOfIndex(inst); assert(ptr_ty.ptrAddressSpace() == .generic); const child_ty = ptr_ty.childType(); const child_ty_ref = try self.resolveType(child_ty, .indirect); @@ -2694,7 +2695,7 @@ pub const DeclGen = struct { incoming_blocks.deinit(self.gpa); } - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; @@ -2727,7 +2728,7 @@ pub const DeclGen = struct { fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void { const br = self.air.instructions.items(.data)[inst].br; const block = self.blocks.get(br.block_inst).?; - const operand_ty = self.air.typeOf(br.operand); + const operand_ty = self.typeOf(br.operand); const mod = self.module; if (operand_ty.hasRuntimeBits(mod)) { @@ -2777,7 +2778,7 @@ pub const DeclGen = struct { fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const operand = try self.resolve(ty_op.operand); if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; @@ -2787,7 +2788,7 @@ pub const DeclGen = struct { fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr = try self.resolve(bin_op.lhs); const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); @@ -2819,7 +2820,7 @@ pub const DeclGen = struct { fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { const operand = self.air.instructions.items(.data)[inst].un_op; - const operand_ty = self.air.typeOf(operand); + const operand_ty = self.typeOf(operand); const mod = self.module; if (operand_ty.hasRuntimeBits(mod)) { const operand_id = try self.resolve(operand); @@ -2832,7 +2833,7 @@ pub const DeclGen = struct { fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void { const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(); if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2853,8 +2854,8 @@ pub const DeclGen = struct { const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const err_union_ty = self.typeOf(pl_op.operand); + const payload_ty = self.typeOfIndex(inst); const err_ty_ref = try self.resolveType(Type.anyerror, .direct); const bool_ty_ref = try self.resolveType(Type.bool, .direct); @@ -2911,7 +2912,7 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const err_union_ty = self.air.typeOf(ty_op.operand); + const err_union_ty = self.typeOf(ty_op.operand); const err_ty_ref = try self.resolveType(Type.anyerror, .direct); if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { @@ -2934,7 +2935,7 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOfIndex(inst); + const err_union_ty = self.typeOfIndex(inst); const payload_ty = err_union_ty.errorUnionPayload(); const operand_id = try self.resolve(ty_op.operand); const eu_layout = self.errorUnionLayout(payload_ty); @@ -2966,7 +2967,7 @@ pub const DeclGen = struct { const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_id = try self.resolve(un_op); - const optional_ty = self.air.typeOf(un_op); + const optional_ty = self.typeOf(un_op); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); @@ -3030,8 +3031,8 @@ pub const DeclGen = struct { const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOf(ty_op.operand); + const payload_ty = self.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -3047,14 +3048,14 @@ pub const DeclGen = struct { const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try self.constBool(true, .direct); } const operand_id = try self.resolve(ty_op.operand); - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; } @@ -3068,7 +3069,7 @@ pub const DeclGen = struct { const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolve(pl_op.operand); - const cond_ty = self.air.typeOf(pl_op.operand); + const cond_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) { @@ -3317,7 +3318,7 @@ pub const DeclGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const callee_ty = self.air.typeOf(pl_op.operand); + const callee_ty = self.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => return self.fail("cannot call function pointers", .{}), @@ -3339,7 +3340,7 @@ pub const DeclGen = struct { // before starting to emit OpFunctionCall instructions. Hence the // temporary params buffer. const arg_id = try self.resolve(arg); - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; params[n_params] = arg_id; @@ -3363,4 +3364,14 @@ pub const DeclGen = struct { return result_id; } + + fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type { + const mod = self.module; + return self.air.typeOf(inst, mod.intern_pool); + } + + fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type { + const mod = self.module; + return self.air.typeOfIndex(inst, mod.intern_pool); + } }; diff --git a/src/print_air.zig b/src/print_air.zig index e8875ff01834..39a244e11ffc 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -306,6 +306,7 @@ const Writer = struct { .struct_field_ptr => try w.writeStructField(s, inst), .struct_field_val => try w.writeStructField(s, inst), .constant => try w.writeConstant(s, inst), + .interned => try w.writeInterned(s, inst), .assembly => try w.writeAssembly(s, inst), .dbg_stmt => try w.writeDbgStmt(s, inst), @@ -515,7 +516,7 @@ const Writer = struct { const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Bin, pl_op.payload).data; - const elem_ty = w.air.typeOfIndex(inst).childType(); + const elem_ty = w.typeOfIndex(inst).childType(); try w.writeType(s, elem_ty); try s.writeAll(", "); try w.writeOperand(s, inst, 0, pl_op.operand); @@ -614,6 +615,14 @@ const Writer = struct { try s.print(", {}", .{val.fmtValue(ty, w.module)}); } + fn writeInterned(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const mod = w.module; + const ip_index = w.air.instructions.items(.data)[inst].interned; + const ty = ip_index.toType(); + try w.writeType(s, ty); + try s.print(", {}", .{ip_index.toValue().fmtValue(ty, mod)}); + } + fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const extra = w.air.extraData(Air.Asm, ty_pl.payload); @@ -622,7 +631,7 @@ const Writer = struct { var extra_i: usize = extra.end; var op_index: usize = 0; - const ret_ty = w.air.typeOfIndex(inst); + const ret_ty = w.typeOfIndex(inst); try w.writeType(s, ret_ty); if (is_volatile) { @@ -985,4 +994,9 @@ const Writer = struct { try s.print("%{d}", .{inst}); if (dies) try s.writeByte('!'); } + + fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type { + const mod = w.module; + return w.air.typeOfIndex(inst, mod.intern_pool); + } }; diff --git a/src/type.zig b/src/type.zig index 259079a26c14..94fd4c2eafe5 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2827,21 +2827,35 @@ pub const Type = struct { }; } - /// TODO add enums with no fields here pub fn isNoReturn(ty: Type) bool { - switch (ty.tag()) { - .noreturn => return true, - .error_set => { - const err_set_obj = ty.castTag(.error_set).?.data; - const names = err_set_obj.names.keys(); - return names.len == 0; - }, - .error_set_merged => { - const name_map = ty.castTag(.error_set_merged).?.data; - const names = name_map.keys(); - return names.len == 0; - }, + switch (@enumToInt(ty.ip_index)) { + @enumToInt(InternPool.Index.first_type)...@enumToInt(InternPool.Index.noreturn_type) - 1 => return false, + + @enumToInt(InternPool.Index.noreturn_type) => return true, + + @enumToInt(InternPool.Index.noreturn_type) + 1...@enumToInt(InternPool.Index.last_type) => return false, + + @enumToInt(InternPool.Index.first_value)...@enumToInt(InternPool.Index.last_value) => unreachable, + @enumToInt(InternPool.Index.generic_poison) => unreachable, + + // TODO add empty error sets here + // TODO add enums with no fields here else => return false, + + @enumToInt(InternPool.Index.none) => switch (ty.tag()) { + .noreturn => return true, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + return names.len == 0; + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + return names.len == 0; + }, + else => return false, + }, } } From 50f33734c6cec10a0132644c08ee443c2dd224e2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 2 May 2023 20:01:32 -0700 Subject: [PATCH 005/205] stage2: isGenericPoison InternPool awareness --- src/Module.zig | 2 +- src/Sema.zig | 44 ++++++++++++++++++++++---------------------- src/type.zig | 24 ++++++++++++++++-------- src/value.zig | 8 ++++++++ 4 files changed, 47 insertions(+), 31 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 4187ac206b5e..a4ae107bede2 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5728,7 +5728,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const param_ty = if (func.comptime_args) |comptime_args| t: { const arg_tv = comptime_args[total_param_index]; - const arg_val = if (arg_tv.val.tag() != .generic_poison) + const arg_val = if (!arg_tv.val.isGenericPoison()) arg_tv.val else if (arg_tv.ty.onePossibleValue(mod)) |opv| opv diff --git a/src/Sema.zig b/src/Sema.zig index 540474c84ab3..45da0927cda7 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -300,7 +300,7 @@ pub const Block = struct { const src_decl = sema.mod.declPtr(rt.block.src_decl); break :blk rt.func_src.toSrcLoc(src_decl); }; - if (rt.return_ty.tag() == .generic_poison) { + if (rt.return_ty.isGenericPoison()) { return sema.mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{}); } try sema.mod.errNoteNonLazy( @@ -1730,7 +1730,7 @@ pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { // The last section of indexes refers to the map of ZIR => AIR. const inst = sema.inst_map.get(i - InternPool.static_len).?; const ty = sema.typeOf(inst); - if (ty.tag() == .generic_poison) return error.GenericPoison; + if (ty.isGenericPoison()) return error.GenericPoison; return inst; } @@ -1766,7 +1766,7 @@ pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Ins const air_inst = try sema.resolveInst(zir_ref); assert(air_inst != .var_args_param_type); const ty = try sema.analyzeAsType(block, src, air_inst); - if (ty.tag() == .generic_poison) return error.GenericPoison; + if (ty.isGenericPoison()) return error.GenericPoison; return ty; } @@ -1827,7 +1827,7 @@ fn resolveValue( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| { - if (val.tag() == .generic_poison) return error.GenericPoison; + if (val.isGenericPoison()) return error.GenericPoison; return val; } return sema.failWithNeededComptime(block, src, reason); @@ -6549,8 +6549,8 @@ const GenericCallAdapter = struct { const other_comptime_args = other_key.comptime_args.?; for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| { const this_arg = ctx.args[i]; - const this_is_comptime = this_arg.val.tag() != .generic_poison; - const other_is_comptime = other_arg.val.tag() != .generic_poison; + const this_is_comptime = !this_arg.val.isGenericPoison(); + const other_is_comptime = !other_arg.val.isGenericPoison(); const this_is_anytype = this_arg.is_anytype; const other_is_anytype = other_key.isAnytypeParam(ctx.module, @intCast(u32, i)); @@ -7189,7 +7189,7 @@ fn analyzeInlineCallArg( const param_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const param_ty = param_ty: { const raw_param_ty = raw_param_types[arg_i.*]; - if (raw_param_ty.tag() != .generic_poison) break :param_ty raw_param_ty; + if (!raw_param_ty.isGenericPoison()) break :param_ty raw_param_ty; const param_ty_inst = try sema.resolveBody(param_block, param_body, inst); break :param_ty try sema.analyzeAsType(param_block, param_src, param_ty_inst); }; @@ -7317,7 +7317,7 @@ fn analyzeGenericCallArg( runtime_i: *u32, ) !void { const mod = sema.mod; - const is_runtime = comptime_arg.val.tag() == .generic_poison and + const is_runtime = comptime_arg.val.isGenericPoison() and comptime_arg.ty.hasRuntimeBits(mod) and !(try sema.typeRequiresComptime(comptime_arg.ty)); if (is_runtime) { @@ -8882,7 +8882,7 @@ fn funcCommon( const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset }; const func_src = LazySrcLoc.nodeOffset(src_node_offset); - var is_generic = bare_return_type.tag() == .generic_poison or + var is_generic = bare_return_type.isGenericPoison() or alignment == null or address_space == null or section == .generic or @@ -8965,7 +8965,7 @@ fn funcCommon( var ret_ty_requires_comptime = false; const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: { ret_ty_requires_comptime = ret_comptime; - break :rp bare_return_type.tag() == .generic_poison; + break :rp bare_return_type.isGenericPoison(); } else |err| switch (err) { error.GenericPoison => rp: { is_generic = true; @@ -9208,7 +9208,7 @@ fn analyzeParameter( const mod = sema.mod; const requires_comptime = try sema.typeRequiresComptime(param.ty); comptime_params[i] = param.is_comptime or requires_comptime; - const this_generic = param.ty.tag() == .generic_poison; + const this_generic = param.ty.isGenericPoison(); is_generic.* = is_generic.* or this_generic; const target = mod.getTarget(); if (param.is_comptime and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { @@ -15872,7 +15872,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len); for (param_vals, 0..) |*param_val, i| { const param_ty = info.param_types[i]; - const is_generic = param_ty.tag() == .generic_poison; + const is_generic = param_ty.isGenericPoison(); const param_ty_val = if (is_generic) Value.null else @@ -15936,7 +15936,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }); }; - const ret_ty_opt = if (info.return_type.tag() != .generic_poison) + const ret_ty_opt = if (!info.return_type.isGenericPoison()) try Value.Tag.opt_payload.create( sema.arena, try Value.Tag.ty.create(sema.arena, info.return_type), @@ -16713,7 +16713,7 @@ fn zirTypeofBuiltin(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const operand = try sema.resolveBody(&child_block, body, inst); const operand_ty = sema.typeOf(operand); - if (operand_ty.tag() == .generic_poison) return error.GenericPoison; + if (operand_ty.isGenericPoison()) return error.GenericPoison; return sema.addType(operand_ty); } @@ -17589,7 +17589,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } return err; }; - if (ty.tag() == .generic_poison) return error.GenericPoison; + if (ty.isGenericPoison()) return error.GenericPoison; break :blk ty; }; const target = sema.mod.getTarget(); @@ -22575,7 +22575,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += body.len; const val = try sema.resolveGenericBody(block, align_src, body, inst, Type.u29, "alignment must be comptime-known"); - if (val.tag() == .generic_poison) { + if (val.isGenericPoison()) { break :blk null; } const alignment = @intCast(u32, val.toUnsignedInt(mod)); @@ -22611,7 +22611,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const addrspace_ty = try sema.getBuiltinType("AddressSpace"); const val = try sema.resolveGenericBody(block, addrspace_src, body, inst, addrspace_ty, "addrespace must be comptime-known"); - if (val.tag() == .generic_poison) { + if (val.isGenericPoison()) { break :blk null; } break :blk val.toEnum(std.builtin.AddressSpace); @@ -22635,7 +22635,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const ty = Type.initTag(.const_slice_u8); const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known"); - if (val.tag() == .generic_poison) { + if (val.isGenericPoison()) { break :blk FuncLinkSection{ .generic = {} }; } break :blk FuncLinkSection{ .explicit = try val.toAllocatedBytes(ty, sema.arena, sema.mod) }; @@ -22659,7 +22659,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const cc_ty = try sema.getBuiltinType("CallingConvention"); const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, "calling convention must be comptime-known"); - if (val.tag() == .generic_poison) { + if (val.isGenericPoison()) { break :blk null; } break :blk val.toEnum(std.builtin.CallingConvention); @@ -31790,7 +31790,7 @@ fn resolveInferredErrorSet( // if ies declared by a inline function with generic return type, the return_type should be generic_poison, // because inline function does not create a new declaration, and the ies has been filled with analyzeCall, // so here we can simply skip this case. - if (ies_func_info.return_type.tag() == .generic_poison) { + if (ies_func_info.return_type.isGenericPoison()) { assert(ies_func_info.cc == .Inline); } else if (ies_func_info.return_type.errorUnionSet().castTag(.error_set_inferred).?.data == ies) { if (ies_func_info.is_generic) { @@ -32048,7 +32048,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void else => |e| return e, }; }; - if (field_ty.tag() == .generic_poison) { + if (field_ty.isGenericPoison()) { return error.GenericPoison; } @@ -32442,7 +32442,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { else => |e| return e, }; - if (field_ty.tag() == .generic_poison) { + if (field_ty.isGenericPoison()) { return error.GenericPoison; } diff --git a/src/type.zig b/src/type.zig index 94fd4c2eafe5..9f14619c2c64 100644 --- a/src/type.zig +++ b/src/type.zig @@ -727,8 +727,8 @@ pub const Type = struct { const a_info = a.fnInfo(); const b_info = b.fnInfo(); - if (a_info.return_type.tag() != .generic_poison and - b_info.return_type.tag() != .generic_poison and + if (!a_info.return_type.isGenericPoison() and + !b_info.return_type.isGenericPoison() and !eql(a_info.return_type, b_info.return_type, mod)) return false; @@ -758,8 +758,8 @@ pub const Type = struct { if (a_info.comptime_params[i] != b_info.comptime_params[i]) return false; - if (a_param_ty.tag() == .generic_poison) continue; - if (b_param_ty.tag() == .generic_poison) continue; + if (a_param_ty.isGenericPoison()) continue; + if (b_param_ty.isGenericPoison()) continue; if (!eql(a_param_ty, b_param_ty, mod)) return false; @@ -1131,7 +1131,7 @@ pub const Type = struct { std.hash.autoHash(hasher, std.builtin.TypeId.Fn); const fn_info = ty.fnInfo(); - if (fn_info.return_type.tag() != .generic_poison) { + if (!fn_info.return_type.isGenericPoison()) { hashWithHasher(fn_info.return_type, hasher, mod); } if (!fn_info.align_is_generic) { @@ -1148,7 +1148,7 @@ pub const Type = struct { std.hash.autoHash(hasher, fn_info.param_types.len); for (fn_info.param_types, 0..) |param_ty, i| { std.hash.autoHash(hasher, fn_info.paramIsComptime(i)); - if (param_ty.tag() == .generic_poison) continue; + if (param_ty.isGenericPoison()) continue; hashWithHasher(param_ty, hasher, mod); } }, @@ -2154,7 +2154,7 @@ pub const Type = struct { if (std.math.cast(u5, i)) |index| if (@truncate(u1, fn_info.noalias_bits >> index) != 0) { try writer.writeAll("noalias "); }; - if (param_ty.tag() == .generic_poison) { + if (param_ty.isGenericPoison()) { try writer.writeAll("anytype"); } else { try print(param_ty, writer, mod); @@ -2175,7 +2175,7 @@ pub const Type = struct { try writer.writeAll(@tagName(fn_info.cc)); try writer.writeAll(") "); } - if (fn_info.return_type.tag() == .generic_poison) { + if (fn_info.return_type.isGenericPoison()) { try writer.writeAll("anytype"); } else { try print(fn_info.return_type, writer, mod); @@ -6075,6 +6075,14 @@ pub const Type = struct { } } + pub fn isGenericPoison(ty: Type) bool { + return switch (ty.ip_index) { + .generic_poison_type => true, + .none => ty.tag() == .generic_poison, + else => false, + }; + } + /// This enum does not directly correspond to `std.builtin.TypeId` because /// it has extra enum tags in it, as a way of using less memory. For example, /// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types diff --git a/src/value.zig b/src/value.zig index 8c824b07205e..d89be35d85af 100644 --- a/src/value.zig +++ b/src/value.zig @@ -5416,6 +5416,14 @@ pub const Value = struct { return initPayload(&value_buffer.base); } + pub fn isGenericPoison(val: Value) bool { + return switch (val.ip_index) { + .generic_poison => true, + .none => val.tag() == .generic_poison, + else => false, + }; + } + /// This type is not copyable since it may contain pointers to its inner data. pub const Payload = struct { tag: Tag, From b125063dcfb95b470bc7830b188614361d3ba4cb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 12:30:06 -0700 Subject: [PATCH 006/205] InternPool: implement typeHasOnePossibleValue for simple_type --- src/Sema.zig | 47 ++++++++++++++++++++++++++++++++++++++++++++++- src/type.zig | 47 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 45da0927cda7..79fe7b2eb06f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -32776,7 +32776,52 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .@"anyframe", + .enum_literal, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + => return null, + + .void => return Value.void, + .noreturn => return Value.initTag(.unreachable_value), + .null => return Value.null, + .undefined => return Value.undef, + + .generic_poison => return error.GenericPoison, + }, .struct_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, diff --git a/src/type.zig b/src/type.zig index 9f14619c2c64..a4f1f5174b41 100644 --- a/src/type.zig +++ b/src/type.zig @@ -5030,7 +5030,52 @@ pub const Type = struct { .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .@"anyframe", + .enum_literal, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + => return null, + + .void => return Value.void, + .noreturn => return Value.initTag(.unreachable_value), + .null => return Value.null, + .undefined => return Value.undef, + + .generic_poison => unreachable, + }, .struct_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, From d1887ab1dde6b92e9cec374890d4c425b42ad376 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 12:42:09 -0700 Subject: [PATCH 007/205] InternPool: implement hasRuntimeBitsAdvanced for simple_type --- src/Sema.zig | 1 + src/type.zig | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 79fe7b2eb06f..4d713064a6b8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -32821,6 +32821,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .undefined => return Value.undef, .generic_poison => return error.GenericPoison, + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .simple_value => unreachable, diff --git a/src/type.zig b/src/type.zig index a4f1f5174b41..eaa240aa1e6d 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2440,7 +2440,55 @@ pub const Type = struct { .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| return switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .anyerror, + .@"anyframe", + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => true, + + // These are false because they are comptime-only types. + .void, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + => false, + + .generic_poison => unreachable, + .var_args_param => unreachable, + }, .struct_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, @@ -2500,7 +2548,6 @@ pub const Type = struct { .@"anyframe", .anyopaque, .@"opaque", - .type_info, .error_set_single, .error_union, .error_set, @@ -2545,6 +2592,7 @@ pub const Type = struct { .enum_literal, .empty_struct, .empty_struct_literal, + .type_info, // These are function *bodies*, not pointers. // Special exceptions have to be made when emitting functions due to // this returning false. @@ -5075,6 +5123,7 @@ pub const Type = struct { .undefined => return Value.undef, .generic_poison => unreachable, + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .simple_value => unreachable, From cdf6acba961648cc800027b3e0adb0a3593a610a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 12:46:56 -0700 Subject: [PATCH 008/205] InternPool: implement hasWellDefinedLayout for simple_type --- src/type.zig | 47 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/src/type.zig b/src/type.zig index eaa240aa1e6d..027f4432f42b 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2721,7 +2721,52 @@ pub const Type = struct { .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| return switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .void, + => true, + + .anyerror, + .@"anyframe", + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + .generic_poison, + => false, + }, .struct_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, From 264292f430668652818b30fe6cf5d8b434530c84 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 13:01:10 -0700 Subject: [PATCH 009/205] InternPool: implement resolveTypeFields --- src/InternPool.zig | 24 +++++++- src/Sema.zig | 135 +++++++++++++++++++++++++++++++++++++-------- src/type.zig | 10 ++++ 3 files changed, 146 insertions(+), 23 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index eadaf0da5eeb..146a880493d0 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -78,7 +78,11 @@ pub const Key = union(enum) { }, struct_type: struct { fields_len: u32, - // TODO move Module.Struct data to here + // TODO move Module.Struct data to InternPool + }, + union_type: struct { + fields_len: u32, + // TODO move Module.Union data to InternPool }, pub const IntType = std.builtin.Type.Int; @@ -126,6 +130,10 @@ pub const Key = union(enum) { @panic("TODO"); } }, + .union_type => |union_type| { + _ = union_type; + @panic("TODO"); + }, } } @@ -195,6 +203,14 @@ pub const Key = union(enum) { @panic("TODO"); }, + + .union_type => |a_info| { + const b_info = b.union_type; + + _ = a_info; + _ = b_info; + @panic("TODO"); + }, } } @@ -208,6 +224,7 @@ pub const Key = union(enum) { .error_union_type, .simple_type, .struct_type, + .union_type, => return .type_type, .int => |x| return x.ty, @@ -978,6 +995,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = @enumToInt(SimpleInternal.type_empty_struct), }); }, + + .union_type => |union_type| { + _ = union_type; + @panic("TODO"); + }, } return @intToEnum(Index, ip.items.len - 1); } diff --git a/src/Sema.zig b/src/Sema.zig index 4d713064a6b8..ceaae1fbc8b1 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -31373,6 +31373,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_union_type => @panic("TODO"), .simple_type => @panic("TODO"), .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -31660,30 +31661,118 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { } pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - try sema.resolveTypeFieldsStruct(ty, struct_obj); - return ty; - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - try sema.resolveTypeFieldsUnion(ty, union_obj); - return ty; + const mod = sema.mod; + + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + try sema.resolveTypeFieldsStruct(ty, struct_obj); + return ty; + }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + try sema.resolveTypeFieldsUnion(ty, union_obj); + return ty; + }, + .type_info => return sema.getBuiltinType("Type"), + .extern_options => return sema.getBuiltinType("ExternOptions"), + .export_options => return sema.getBuiltinType("ExportOptions"), + .atomic_order => return sema.getBuiltinType("AtomicOrder"), + .atomic_rmw_op => return sema.getBuiltinType("AtomicRmwOp"), + .calling_convention => return sema.getBuiltinType("CallingConvention"), + .address_space => return sema.getBuiltinType("AddressSpace"), + .float_mode => return sema.getBuiltinType("FloatMode"), + .reduce_op => return sema.getBuiltinType("ReduceOp"), + .modifier => return sema.getBuiltinType("CallModifier"), + .prefetch_options => return sema.getBuiltinType("PrefetchOptions"), + + else => return ty, }, - .type_info => return sema.getBuiltinType("Type"), - .extern_options => return sema.getBuiltinType("ExternOptions"), - .export_options => return sema.getBuiltinType("ExportOptions"), - .atomic_order => return sema.getBuiltinType("AtomicOrder"), - .atomic_rmw_op => return sema.getBuiltinType("AtomicRmwOp"), - .calling_convention => return sema.getBuiltinType("CallingConvention"), - .address_space => return sema.getBuiltinType("AddressSpace"), - .float_mode => return sema.getBuiltinType("FloatMode"), - .reduce_op => return sema.getBuiltinType("ReduceOp"), - .modifier => return sema.getBuiltinType("CallModifier"), - .prefetch_options => return sema.getBuiltinType("PrefetchOptions"), - else => return ty, + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .anyopaque_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .anyframe_type, + .null_type, + .undefined_type, + .enum_literal_type, + .manyptr_u8_type, + .manyptr_const_u8_type, + .single_const_pointer_to_comptime_int_type, + .const_slice_u8_type, + .anyerror_void_error_union_type, + .generic_poison_type, + .empty_struct_type, + => return ty, + + .undef => unreachable, + .zero => unreachable, + .zero_usize => unreachable, + .one => unreachable, + .one_usize => unreachable, + .calling_convention_c => unreachable, + .calling_convention_inline => unreachable, + .void_value => unreachable, + .unreachable_value => unreachable, + .null_value => unreachable, + .bool_true => unreachable, + .bool_false => unreachable, + .empty_struct => unreachable, + .generic_poison => unreachable, + + .type_info_type => return sema.getBuiltinType("Type"), + .extern_options_type => return sema.getBuiltinType("ExternOptions"), + .export_options_type => return sema.getBuiltinType("ExportOptions"), + .atomic_order_type => return sema.getBuiltinType("AtomicOrder"), + .atomic_rmw_op_type => return sema.getBuiltinType("AtomicRmwOp"), + .calling_convention_type => return sema.getBuiltinType("CallingConvention"), + .address_space_type => return sema.getBuiltinType("AddressSpace"), + .float_mode_type => return sema.getBuiltinType("FloatMode"), + .reduce_op_type => return sema.getBuiltinType("ReduceOp"), + .call_modifier_type => return sema.getBuiltinType("CallModifier"), + .prefetch_options_type => return sema.getBuiltinType("PrefetchOptions"), + + _ => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + else => return ty, + }, } } @@ -32824,6 +32913,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .var_args_param => unreachable, }, .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -33475,6 +33565,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_union_type => @panic("TODO"), .simple_type => @panic("TODO"), .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, diff --git a/src/type.zig b/src/type.zig index 027f4432f42b..cafb1beefebe 100644 --- a/src/type.zig +++ b/src/type.zig @@ -43,6 +43,7 @@ pub const Type = struct { .optional_type => return .Optional, .error_union_type => return .ErrorUnion, .struct_type => return .Struct, + .union_type => return .Union, .simple_type => |s| switch (s) { .f16, .f32, @@ -2018,6 +2019,7 @@ pub const Type = struct { .error_union_type => @panic("TODO"), .simple_type => |s| return writer.writeAll(@tagName(s)), .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -2490,6 +2492,7 @@ pub const Type = struct { .var_args_param => unreachable, }, .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -2768,6 +2771,7 @@ pub const Type = struct { => false, }, .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -3083,6 +3087,7 @@ pub const Type = struct { .error_union_type => @panic("TODO"), .simple_type => @panic("TODO"), .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -3478,6 +3483,7 @@ pub const Type = struct { .error_union_type => @panic("TODO"), .simple_type => @panic("TODO"), .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -3816,6 +3822,7 @@ pub const Type = struct { .error_union_type => @panic("TODO"), .simple_type => @panic("TODO"), .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -4847,6 +4854,7 @@ pub const Type = struct { .error_union_type => @panic("TODO"), .simple_type => @panic("TODO"), .struct_type => unreachable, + .union_type => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -5171,6 +5179,7 @@ pub const Type = struct { .var_args_param => unreachable, }, .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -5373,6 +5382,7 @@ pub const Type = struct { .error_union_type => @panic("TODO"), .simple_type => @panic("TODO"), .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, From e77dede87e8cff2679485aecf0d3af146595db3a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 13:38:59 -0700 Subject: [PATCH 010/205] InternPool: implement typePtrOrOptionalPtrTy --- src/Sema.zig | 35 +++++++++++++++++++++++++++++++++-- src/type.zig | 20 ++++++++++++++++---- src/value.zig | 2 ++ 3 files changed, 51 insertions(+), 6 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index ceaae1fbc8b1..c39146ca5ae4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1898,10 +1898,14 @@ fn resolveMaybeUndefVal( inst: Air.Inst.Ref, ) CompileError!?Value { const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null; - switch (val.tag()) { - .variable => return null, + switch (val.ip_index) { .generic_poison => return error.GenericPoison, else => return val, + .none => switch (val.tag()) { + .variable => return null, + .generic_poison => return error.GenericPoison, + else => return val, + }, } } @@ -33497,6 +33501,33 @@ fn typePtrOrOptionalPtrTy( buf: *Type.Payload.ElemType, ) !?Type { const mod = sema.mod; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => return null, + .C => return ptr_type.elem_type.toType(), + .One, .Many => return ty, + }, + .optional_type => |o| switch (mod.intern_pool.indexToKey(o.payload_type)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice, .C => return null, + .Many, .One => { + if (ptr_type.is_allowzero) return null; + + // optionals of zero sized types behave like bools, not pointers + const payload_ty = o.payload_type.toType(); + if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) { + return null; + } + + return payload_ty; + }, + }, + else => return null, + }, + else => return null, + }; + switch (ty.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer, diff --git a/src/type.zig b/src/type.zig index cafb1beefebe..8fd0ea6869a4 100644 --- a/src/type.zig +++ b/src/type.zig @@ -4319,8 +4319,20 @@ pub const Type = struct { /// Returns true if the type is optional and would be lowered to a single pointer /// address value, using 0 for null. Note that this returns true for C pointers. - pub fn isPtrLikeOptional(self: Type, mod: *const Module) bool { - switch (self.tag()) { + /// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`. + pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.size == .C, + .optional_type => |o| switch (mod.intern_pool.indexToKey(o.payload_type)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice, .C => false, + .Many, .One => !ptr_type.is_allowzero, + }, + else => false, + }, + else => false, + }; + switch (ty.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer, .c_const_pointer, @@ -4328,7 +4340,7 @@ pub const Type = struct { => return true, .optional => { - const child_ty = self.castTag(.optional).?.data; + const child_ty = ty.castTag(.optional).?.data; if (child_ty.zigTypeTag(mod) != .Pointer) return false; const info = child_ty.ptrInfo().data; switch (info.size) { @@ -4337,7 +4349,7 @@ pub const Type = struct { } }, - .pointer => return self.castTag(.pointer).?.data.size == .C, + .pointer => return ty.castTag(.pointer).?.data.size == .C, else => return false, } diff --git a/src/value.zig b/src/value.zig index d89be35d85af..7f0e6006f00e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -117,6 +117,8 @@ pub const Value = struct { int_big_negative, function, extern_fn, + /// A comptime-known pointer can point to the address of a global + /// variable. The child element value in this case will have this tag. variable, /// A wrapper for values which are comptime-known but should /// semantically be runtime-known. From aa1bb5517d57ae7540ce2c7a4315b2f242d1470c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 13:56:01 -0700 Subject: [PATCH 011/205] InternPool: implement isSinglePointer --- src/Sema.zig | 31 ++++++++++++++++--------------- src/codegen/c.zig | 2 +- src/codegen/llvm.zig | 4 ++-- src/type.zig | 33 ++++++++++++++++++++++----------- 4 files changed, 41 insertions(+), 29 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index c39146ca5ae4..14037d030e9b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2086,7 +2086,7 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError { const mod = sema.mod; - const inner_ty = if (object_ty.isSinglePointer()) object_ty.childType() else object_ty; + const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType() else object_ty; if (inner_ty.zigTypeTag(mod) == .Optional) opt: { var buf: Type.Payload.ElemType = undefined; @@ -3412,8 +3412,9 @@ fn indexablePtrLen( src: LazySrcLoc, object: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const object_ty = sema.typeOf(object); - const is_pointer_to = object_ty.isSinglePointer(); + const is_pointer_to = object_ty.isSinglePointer(mod); const indexable_ty = if (is_pointer_to) object_ty.childType() else object_ty; try checkIndexable(sema, block, src, indexable_ty); return sema.fieldVal(block, src, object, "len", src); @@ -12764,12 +12765,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs), else => unreachable, }) |rhs_val| { - const lhs_sub_val = if (lhs_ty.isSinglePointer()) + const lhs_sub_val = if (lhs_ty.isSinglePointer(mod)) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; - const rhs_sub_val = if (rhs_ty.isSinglePointer()) + const rhs_sub_val = if (rhs_ty.isSinglePointer(mod)) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val; @@ -13022,7 +13023,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { const final_len_including_sent = result_len + @boolToInt(lhs_info.sentinel != null); - const lhs_sub_val = if (lhs_ty.isSinglePointer()) + const lhs_sub_val = if (lhs_ty.isSinglePointer(mod)) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; @@ -17588,7 +17589,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const elem_ty = blk: { const air_inst = try sema.resolveInst(extra.data.elem_type); const ty = sema.analyzeAsType(block, elem_ty_src, air_inst) catch |err| { - if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer()) { + if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer(mod)) { try sema.errNote(block, elem_ty_src, sema.err.?, "use '.*' to dereference pointer", .{}); } return err; @@ -23902,7 +23903,7 @@ fn fieldVal( // Zig allows dereferencing a single pointer during field lookup. Note that // we don't actually need to generate the dereference some field lookups, like the // length of arrays and other comptime operations. - const is_pointer_to = object_ty.isSinglePointer(); + const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) object_ty.childType() @@ -24092,7 +24093,7 @@ fn fieldPtr( // Zig allows dereferencing a single pointer during field lookup. Note that // we don't actually need to generate the dereference some field lookups, like the // length of arrays and other comptime operations. - const is_pointer_to = object_ty.isSinglePointer(); + const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) object_ty.childType() @@ -25622,7 +25623,7 @@ fn coerceExtra( // *T to *[1]T single_item: { if (dest_info.size != .One) break :single_item; - if (!inst_ty.isSinglePointer()) break :single_item; + if (!inst_ty.isSinglePointer(mod)) break :single_item; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const ptr_elem_ty = inst_ty.childType(); const array_ty = dest_info.pointee_type; @@ -25639,7 +25640,7 @@ fn coerceExtra( // Coercions where the source is a single pointer to an array. src_array_ptr: { - if (!inst_ty.isSinglePointer()) break :src_array_ptr; + if (!inst_ty.isSinglePointer(mod)) break :src_array_ptr; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const array_ty = inst_ty.childType(); if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr; @@ -25794,7 +25795,7 @@ fn coerceExtra( .One => switch (dest_info.pointee_type.zigTypeTag(mod)) { .Union => { // pointer to anonymous struct to pointer to union - if (inst_ty.isSinglePointer() and + if (inst_ty.isSinglePointer(mod) and inst_ty.childType().isAnonStruct() and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { @@ -25803,7 +25804,7 @@ fn coerceExtra( }, .Struct => { // pointer to anonymous struct to pointer to struct - if (inst_ty.isSinglePointer() and + if (inst_ty.isSinglePointer(mod) and inst_ty.childType().isAnonStruct() and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { @@ -25815,7 +25816,7 @@ fn coerceExtra( }, .Array => { // pointer to tuple to pointer to array - if (inst_ty.isSinglePointer() and + if (inst_ty.isSinglePointer(mod) and inst_ty.childType().isTuple() and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { @@ -25834,7 +25835,7 @@ fn coerceExtra( ); } - if (!inst_ty.isSinglePointer()) break :to_slice; + if (!inst_ty.isSinglePointer(mod)) break :to_slice; const inst_child_ty = inst_ty.childType(); if (!inst_child_ty.isTuple()) break :to_slice; @@ -30807,7 +30808,7 @@ fn resolvePeerTypes( .Vector => continue, else => {}, }, - .Fn => if (chosen_ty.isSinglePointer() and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag(mod) == .Fn) { + .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag(mod) == .Fn) { if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(), candidate_ty, target, src, src)) { continue; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a87f37b1c9d8..f4daa56a6d16 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -3873,7 +3873,7 @@ fn airCmpOp( try reap(f, inst, &.{ data.lhs, data.rhs }); const rhs_ty = f.typeOf(data.rhs); - const need_cast = lhs_ty.isSinglePointer() or rhs_ty.isSinglePointer(); + const need_cast = lhs_ty.isSinglePointer(mod) or rhs_ty.isSinglePointer(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, writer, lhs_ty); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 2e42e8e3fc20..5bc06e4bfcf8 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -5929,7 +5929,7 @@ pub const FuncGen = struct { const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); // TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch - const ptr = if (ptr_ty.isSinglePointer()) ptr: { + const ptr = if (ptr_ty.isSinglePointer(mod)) ptr: { // If this is a single-item pointer to an array, we need another index in the GEP. const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); @@ -5962,7 +5962,7 @@ pub const FuncGen = struct { if (elem_ptr.ptrInfo().data.vector_index != .none) return base_ptr; const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); - if (ptr_ty.isSinglePointer()) { + if (ptr_ty.isSinglePointer(mod)) { // If this is a single-item pointer to an array, we need another index in the GEP. const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); diff --git a/src/type.zig b/src/type.zig index 8fd0ea6869a4..934bfd35ca2e 100644 --- a/src/type.zig +++ b/src/type.zig @@ -4039,19 +4039,25 @@ pub const Type = struct { } } - pub fn isSinglePointer(self: Type) bool { - return switch (self.tag()) { - .single_const_pointer, - .single_mut_pointer, - .single_const_pointer_to_comptime_int, - .inferred_alloc_const, - .inferred_alloc_mut, - => true, + pub fn isSinglePointer(ty: Type, mod: *const Module) bool { + switch (ty.ip_index) { + .none => return switch (ty.tag()) { + .single_const_pointer, + .single_mut_pointer, + .single_const_pointer_to_comptime_int, + .inferred_alloc_const, + .inferred_alloc_mut, + => true, - .pointer => self.castTag(.pointer).?.data.size == .One, + .pointer => ty.castTag(.pointer).?.data.size == .One, - else => false, - }; + else => false, + }, + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_info| ptr_info.size == .One, + else => false, + }, + } } /// Asserts `ty` is a pointer. @@ -6142,6 +6148,11 @@ pub const Type = struct { } pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + else => return null, + }; switch (ty.tag()) { .enum_full, .enum_nonexhaustive => { const enum_full = ty.cast(Payload.EnumFull).?.data; From 4cd8a40b3b34d4e68853088dd637a9da9b6a8891 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 16:07:36 -0700 Subject: [PATCH 012/205] stage2: move float types to InternPool --- src/Sema.zig | 142 ++++++++---- src/arch/wasm/CodeGen.zig | 2 +- src/codegen/c/type.zig | 14 +- src/codegen/llvm.zig | 10 +- src/type.zig | 477 ++++++++++++++++++++++---------------- src/value.zig | 14 +- 6 files changed, 403 insertions(+), 256 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 14037d030e9b..f93ceb19e6b5 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5138,7 +5138,7 @@ fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins defer tracy.end(); const int = sema.code.instructions.items(.data)[inst].int; - return sema.addIntUnsigned(Type.initTag(.comptime_int), int); + return sema.addIntUnsigned(Type.comptime_int, int); } fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -5154,7 +5154,7 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. @memcpy(mem.sliceAsBytes(limbs), limb_bytes); return sema.addConstant( - Type.initTag(.comptime_int), + Type.comptime_int, try Value.Tag.int_big_positive.create(arena, limbs), ); } @@ -5164,7 +5164,7 @@ fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const arena = sema.arena; const number = sema.code.instructions.items(.data)[inst].float; return sema.addConstant( - Type.initTag(.comptime_float), + Type.comptime_float, try Value.Tag.float_64.create(arena, number), ); } @@ -5176,7 +5176,7 @@ fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; const number = extra.get(); return sema.addConstant( - Type.initTag(.comptime_float), + Type.comptime_float, try Value.Tag.float_128.create(arena, number), ); } @@ -15152,8 +15152,8 @@ fn zirAsm( const uncasted_arg = try sema.resolveInst(input.data.operand); const uncasted_arg_ty = sema.typeOf(uncasted_arg); switch (uncasted_arg_ty.zigTypeTag(mod)) { - .ComptimeInt => arg.* = try sema.coerce(block, Type.initTag(.usize), uncasted_arg, src), - .ComptimeFloat => arg.* = try sema.coerce(block, Type.initTag(.f64), uncasted_arg, src), + .ComptimeInt => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src), + .ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src), else => { arg.* = uncasted_arg; try sema.queueFullTypeResolution(uncasted_arg_ty); @@ -31369,14 +31369,59 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const mod = sema.mod; - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => return false, + if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => false, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .@"anyframe", + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, + + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), .simple_value => unreachable, @@ -31409,12 +31454,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, .anyopaque, .bool, .void, @@ -31455,7 +31494,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .single_const_pointer_to_comptime_int, .type, .comptime_int, - .comptime_float, .enum_literal, .type_info, .function, @@ -32926,14 +32964,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }; switch (ty.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, .comptime_int, - .comptime_float, .u1, .u8, .i8, @@ -33193,19 +33224,12 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .c_ulong => return .c_ulong_type, .c_longlong => return .c_longlong_type, .c_ulonglong => return .c_ulonglong_type, - .c_longdouble => return .c_longdouble_type, - .f16 => return .f16_type, - .f32 => return .f32_type, - .f64 => return .f64_type, - .f80 => return .f80_type, - .f128 => return .f128_type, .anyopaque => return .anyopaque_type, .bool => return .bool_type, .void => return .void_type, .type => return .type_type, .anyerror => return .anyerror_type, .comptime_int => return .comptime_int_type, - .comptime_float => return .comptime_float_type, .noreturn => return .noreturn_type, .@"anyframe" => return .anyframe_type, .null => return .null_type, @@ -33595,7 +33619,52 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| return switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .@"anyframe", + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, + + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), .simple_value => unreachable, @@ -33628,20 +33697,12 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, .anyopaque, .bool, .void, .anyerror, .noreturn, .@"anyframe", - .null, - .undefined, .atomic_order, .atomic_rmw_op, .calling_convention, @@ -33674,8 +33735,9 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .single_const_pointer_to_comptime_int, .type, .comptime_int, - .comptime_float, .enum_literal, + .null, + .undefined, .type_info, .function, => true, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index cd61eaf1fbdb..bbba43d26578 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3594,7 +3594,7 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn const mod = func.bin_file.base.options.module.?; // if we bitcast a float to or from an integer we must use the 'reinterpret' instruction if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand; - if (wanted_ty.tag() == .f16 or given_ty.tag() == .f16) return operand; + if (wanted_ty.ip_index == .f16_type or given_ty.ip_index == .f16_type) return operand; if (wanted_ty.bitSize(mod) > 64) return operand; assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod))); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 5064b84b1dfe..b964d16bd958 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1412,13 +1412,13 @@ pub const CType = extern union { .Bool => self.init(.bool), - .Float => self.init(switch (ty.tag()) { - .f16 => .zig_f16, - .f32 => .zig_f32, - .f64 => .zig_f64, - .f80 => .zig_f80, - .f128 => .zig_f128, - .c_longdouble => .zig_c_longdouble, + .Float => self.init(switch (ty.ip_index) { + .f16_type => .zig_f16, + .f32_type => .zig_f32, + .f64_type => .zig_f64, + .f80_type => .zig_f80, + .f128_type => .zig_f128, + .c_longdouble_type => .zig_c_longdouble, else => unreachable, }), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5bc06e4bfcf8..ce78b06f2e72 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -10932,7 +10932,7 @@ const ParamTypeIterator = struct { .riscv32, .riscv64 => { it.zig_index += 1; it.llvm_index += 1; - if (ty.tag() == .f16) { + if (ty.ip_index == .f16_type) { return .as_u16; } switch (riscv_c_abi.classifyType(ty, mod)) { @@ -11264,10 +11264,10 @@ fn backendSupportsF128(target: std.Target) bool { /// LLVM does not support all relevant intrinsics for all targets, so we /// may need to manually generate a libc call fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool { - return switch (scalar_ty.tag()) { - .f16 => backendSupportsF16(target), - .f80 => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target), - .f128 => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target), + return switch (scalar_ty.ip_index) { + .f16_type => backendSupportsF16(target), + .f80_type => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target), + .f128_type => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target), else => true, }; } diff --git a/src/type.zig b/src/type.zig index 934bfd35ca2e..205a7327102c 100644 --- a/src/type.zig +++ b/src/type.zig @@ -50,6 +50,7 @@ pub const Type = struct { .f64, .f80, .f128, + .c_longdouble, => return .Float, .usize, @@ -63,7 +64,6 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, => return .Int, .anyopaque => return .Opaque, @@ -134,14 +134,6 @@ pub const Type = struct { .c_ulonglong, => return .Int, - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - => return .Float, - .error_set, .error_set_single, .anyerror, @@ -154,7 +146,6 @@ pub const Type = struct { .void => return .Void, .type => return .Type, .comptime_int => return .ComptimeInt, - .comptime_float => return .ComptimeFloat, .noreturn => return .NoReturn, .null => return .Null, .undefined => return .Undefined, @@ -618,10 +609,13 @@ pub const Type = struct { } pub fn eql(a: Type, b: Type, mod: *Module) bool { - // As a shortcut, if the small tags / addresses match, we're done. if (a.ip_index != .none or b.ip_index != .none) { + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. return a.ip_index == b.ip_index; } + // As a shortcut, if the small tags / addresses match, we're done. if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true; switch (a.tag()) { @@ -640,18 +634,10 @@ pub const Type = struct { .c_longlong, .c_ulonglong, - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - .bool, .void, .type, .comptime_int, - .comptime_float, .noreturn, .null, .undefined, @@ -1018,7 +1004,11 @@ pub const Type = struct { pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { if (ty.ip_index != .none) { - return mod.intern_pool.indexToKey(ty.ip_index).hashWithHasher(hasher); + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. + std.hash.autoHash(hasher, ty.ip_index); + return; } switch (ty.tag()) { .generic_poison => unreachable, @@ -1039,22 +1029,10 @@ pub const Type = struct { std.hash.autoHash(hasher, ty_tag); }, - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - => |ty_tag| { - std.hash.autoHash(hasher, std.builtin.TypeId.Float); - std.hash.autoHash(hasher, ty_tag); - }, - .bool => std.hash.autoHash(hasher, std.builtin.TypeId.Bool), .void => std.hash.autoHash(hasher, std.builtin.TypeId.Void), .type => std.hash.autoHash(hasher, std.builtin.TypeId.Type), .comptime_int => std.hash.autoHash(hasher, std.builtin.TypeId.ComptimeInt), - .comptime_float => std.hash.autoHash(hasher, std.builtin.TypeId.ComptimeFloat), .noreturn => std.hash.autoHash(hasher, std.builtin.TypeId.NoReturn), .null => std.hash.autoHash(hasher, std.builtin.TypeId.Null), .undefined => std.hash.autoHash(hasher, std.builtin.TypeId.Undefined), @@ -1378,19 +1356,12 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, .anyopaque, - .f16, - .f32, - .f64, - .f80, - .f128, .bool, .void, .type, .anyerror, .comptime_int, - .comptime_float, .noreturn, .null, .undefined, @@ -1671,20 +1642,13 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, .anyopaque, - .f16, - .f32, - .f64, - .f80, - .f128, .bool, .void, .type, .anyerror, .@"anyframe", .comptime_int, - .comptime_float, .noreturn, => return writer.writeAll(@tagName(t)), @@ -2067,20 +2031,13 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, .anyopaque, - .f16, - .f32, - .f64, - .f80, - .f128, .bool, .void, .type, .anyerror, .@"anyframe", .comptime_int, - .comptime_float, .noreturn, => try writer.writeAll(@tagName(t)), @@ -2353,6 +2310,7 @@ pub const Type = struct { } pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { + if (self.ip_index != .none) return self.ip_index.toValue(); switch (self.tag()) { .u1 => return Value.initTag(.u1_type), .u8 => return Value.initTag(.u8_type), @@ -2375,20 +2333,13 @@ pub const Type = struct { .c_ulong => return Value.initTag(.c_ulong_type), .c_longlong => return Value.initTag(.c_longlong_type), .c_ulonglong => return Value.initTag(.c_ulonglong_type), - .c_longdouble => return Value.initTag(.c_longdouble_type), .anyopaque => return Value.initTag(.anyopaque_type), - .f16 => return Value.initTag(.f16_type), - .f32 => return Value.initTag(.f32_type), - .f64 => return Value.initTag(.f64_type), - .f80 => return Value.initTag(.f80_type), - .f128 => return Value.initTag(.f128_type), .bool => return Value.initTag(.bool_type), .void => return Value.initTag(.void_type), .type => return Value.initTag(.type_type), .anyerror => return Value.initTag(.anyerror_type), .@"anyframe" => return Value.initTag(.anyframe_type), .comptime_int => return Value.initTag(.comptime_int_type), - .comptime_float => return Value.initTag(.comptime_float_type), .noreturn => return Value.initTag(.noreturn_type), .null => return Value.initTag(.null_type), .undefined => return Value.initTag(.undefined_type), @@ -2522,12 +2473,6 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, .bool, .anyerror, .const_slice_u8, @@ -2588,7 +2533,6 @@ pub const Type = struct { .void, .type, .comptime_int, - .comptime_float, .noreturn, .null, .undefined, @@ -2801,12 +2745,6 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, .bool, .void, .manyptr_u8, @@ -2852,7 +2790,6 @@ pub const Type = struct { .generic_poison, .type, .comptime_int, - .comptime_float, .enum_literal, .type_info, // These are function bodies, not function pointers. @@ -3085,7 +3022,74 @@ pub const Type = struct { .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .anyopaque, + => return AbiAlignmentAdvanced{ .scalar = 1 }, + + .usize, + .isize, + .export_options, + .extern_options, + .@"anyframe", + => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + + .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, + .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, + .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) }, + .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) }, + .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) }, + .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) }, + .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) }, + .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) }, + .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) }, + .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, + + .f16 => return AbiAlignmentAdvanced{ .scalar = 2 }, + .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) }, + .f64 => switch (target.c_type_bit_size(.double)) { + 64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) }, + else => return AbiAlignmentAdvanced{ .scalar = 8 }, + }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, + else => { + const u80_ty: Type = .{ + .ip_index = .u80_type, + .legacy = undefined, + }; + return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) }; + }, + }, + .f128 => switch (target.c_type_bit_size(.longdouble)) { + 128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, + else => return AbiAlignmentAdvanced{ .scalar = 16 }, + }, + + // TODO revisit this when we have the concept of the error tag type + .anyerror => return AbiAlignmentAdvanced{ .scalar = 2 }, + + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => return AbiAlignmentAdvanced{ .scalar = 0 }, + + .noreturn => unreachable, + .generic_poison => unreachable, + }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), .simple_value => unreachable, @@ -3158,28 +3162,6 @@ pub const Type = struct { .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) }, .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) }, .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) }, - .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - - .f16 => return AbiAlignmentAdvanced{ .scalar = 2 }, - .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) }, - .f64 => switch (target.c_type_bit_size(.double)) { - 64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) }, - else => return AbiAlignmentAdvanced{ .scalar = 8 }, - }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - else => { - const u80_ty: Type = .{ - .ip_index = .u80_type, - .legacy = undefined, - }; - return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) }; - }, - }, - .f128 => switch (target.c_type_bit_size(.longdouble)) { - 128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - else => return AbiAlignmentAdvanced{ .scalar = 16 }, - }, // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, @@ -3366,7 +3348,6 @@ pub const Type = struct { .empty_struct_literal, .type, .comptime_int, - .comptime_float, .null, .undefined, .enum_literal, @@ -3481,7 +3462,69 @@ pub const Type = struct { .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + => return AbiSizeAdvanced{ .scalar = 1 }, + + .f16 => return AbiSizeAdvanced{ .scalar = 2 }, + .f32 => return AbiSizeAdvanced{ .scalar = 4 }, + .f64 => return AbiSizeAdvanced{ .scalar = 8 }, + .f128 => return AbiSizeAdvanced{ .scalar = 16 }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + else => { + const u80_ty: Type = .{ + .ip_index = .u80_type, + .legacy = undefined, + }; + return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; + }, + }, + + .usize, + .isize, + .@"anyframe", + => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + + .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, + .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, + .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, + .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, + .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, + .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, + .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, + .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, + .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, + .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + + .anyopaque, + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + => return AbiSizeAdvanced{ .scalar = 0 }, + + // TODO revisit this when we have the concept of the error tag type + .anyerror => return AbiSizeAdvanced{ .scalar = 2 }, + + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + + .type_info => unreachable, + .noreturn => unreachable, + .generic_poison => unreachable, + }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), .simple_value => unreachable, @@ -3506,7 +3549,6 @@ pub const Type = struct { .anyopaque, .type, .comptime_int, - .comptime_float, .null, .undefined, .enum_literal, @@ -3661,22 +3703,6 @@ pub const Type = struct { .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, - .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - - .f16 => return AbiSizeAdvanced{ .scalar = 2 }, - .f32 => return AbiSizeAdvanced{ .scalar = 4 }, - .f64 => return AbiSizeAdvanced{ .scalar = 8 }, - .f128 => return AbiSizeAdvanced{ .scalar = 16 }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - else => { - const u80_ty: Type = .{ - .ip_index = .u80_type, - .legacy = undefined, - }; - return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; - }, - }, // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, @@ -3820,7 +3846,57 @@ pub const Type = struct { .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .f16 => return 16, + .f32 => return 32, + .f64 => return 64, + .f80 => return 80, + .f128 => return 128, + + .usize, + .isize, + .@"anyframe", + => return target.cpu.arch.ptrBitWidth(), + + .c_char => return target.c_type_bit_size(.char), + .c_short => return target.c_type_bit_size(.short), + .c_ushort => return target.c_type_bit_size(.ushort), + .c_int => return target.c_type_bit_size(.int), + .c_uint => return target.c_type_bit_size(.uint), + .c_long => return target.c_type_bit_size(.long), + .c_ulong => return target.c_type_bit_size(.ulong), + .c_longlong => return target.c_type_bit_size(.longlong), + .c_ulonglong => return target.c_type_bit_size(.ulonglong), + .c_longdouble => return target.c_type_bit_size(.longdouble), + + .bool => return 1, + .void => return 0, + + // TODO revisit this when we have the concept of the error tag type + .anyerror => return 16, + + .anyopaque => unreachable, + .type => unreachable, + .comptime_int => unreachable, + .comptime_float => unreachable, + .noreturn => unreachable, + .null => unreachable, + .undefined => unreachable, + .enum_literal => unreachable, + .generic_poison => unreachable, + + .atomic_order => unreachable, // missing call to resolveTypeFields + .atomic_rmw_op => unreachable, // missing call to resolveTypeFields + .calling_convention => unreachable, // missing call to resolveTypeFields + .address_space => unreachable, // missing call to resolveTypeFields + .float_mode => unreachable, // missing call to resolveTypeFields + .reduce_op => unreachable, // missing call to resolveTypeFields + .call_modifier => unreachable, // missing call to resolveTypeFields + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + .type_info => unreachable, // missing call to resolveTypeFields + }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), .simple_value => unreachable, @@ -3836,7 +3912,6 @@ pub const Type = struct { .anyopaque => unreachable, .type => unreachable, .comptime_int => unreachable, - .comptime_float => unreachable, .noreturn => unreachable, .null => unreachable, .undefined => unreachable, @@ -3852,12 +3927,11 @@ pub const Type = struct { .void => return 0, .bool, .u1 => return 1, .u8, .i8 => return 8, - .i16, .u16, .f16 => return 16, + .i16, .u16 => return 16, .u29 => return 29, - .i32, .u32, .f32 => return 32, - .i64, .u64, .f64 => return 64, - .f80 => return 80, - .u128, .i128, .f128 => return 128, + .i32, .u32 => return 32, + .i64, .u64 => return 64, + .u128, .i128 => return 128, .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -3975,7 +4049,6 @@ pub const Type = struct { .c_ulong => return target.c_type_bit_size(.ulong), .c_longlong => return target.c_type_bit_size(.longlong), .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .c_longdouble => return target.c_type_bit_size(.longdouble), .error_set, .error_set_single, @@ -4950,14 +5023,14 @@ pub const Type = struct { } /// Returns `false` for `comptime_float`. - pub fn isRuntimeFloat(self: Type) bool { - return switch (self.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, + pub fn isRuntimeFloat(ty: Type) bool { + return switch (ty.ip_index) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, => true, else => false, @@ -4965,15 +5038,15 @@ pub const Type = struct { } /// Returns `true` for `comptime_float`. - pub fn isAnyFloat(self: Type) bool { - return switch (self.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - .comptime_float, + pub fn isAnyFloat(ty: Type) bool { + return switch (ty.ip_index) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + .comptime_float_type, => true, else => false, @@ -4982,14 +5055,14 @@ pub const Type = struct { /// Asserts the type is a fixed-size float or comptime_float. /// Returns 128 for comptime_float types. - pub fn floatBits(self: Type, target: Target) u16 { - return switch (self.tag()) { - .f16 => 16, - .f32 => 32, - .f64 => 64, - .f80 => 80, - .f128, .comptime_float => 128, - .c_longdouble => target.c_type_bit_size(.longdouble), + pub fn floatBits(ty: Type, target: Target) u16 { + return switch (ty.ip_index) { + .f16_type => 16, + .f32_type => 32, + .f64_type => 64, + .f80_type => 80, + .f128_type, .comptime_float_type => 128, + .c_longdouble_type => target.c_type_bit_size(.longdouble), else => unreachable, }; @@ -5094,14 +5167,7 @@ pub const Type = struct { else => false, }; return switch (ty.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, .comptime_int, - .comptime_float, .u1, .u8, .i8, @@ -5205,14 +5271,7 @@ pub const Type = struct { }; while (true) switch (ty.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, .comptime_int, - .comptime_float, .u1, .u8, .i8, @@ -5391,14 +5450,59 @@ pub const Type = struct { /// TODO merge these implementations together with the "advanced" pattern seen /// elsewhere in this file. pub fn comptimeOnly(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => return false, + if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => false, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .@"anyframe", + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, + + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), .simple_value => unreachable, @@ -5431,20 +5535,12 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, .anyopaque, .bool, .void, .anyerror, .noreturn, .@"anyframe", - .null, - .undefined, .atomic_order, .atomic_rmw_op, .calling_convention, @@ -5477,11 +5573,12 @@ pub const Type = struct { .single_const_pointer_to_comptime_int, .type, .comptime_int, - .comptime_float, .enum_literal, .type_info, // These are function bodies, not function pointers. .function, + .null, + .undefined, => true, .inferred_alloc_mut => unreachable, @@ -6286,19 +6383,12 @@ pub const Type = struct { c_ulong, c_longlong, c_ulonglong, - c_longdouble, - f16, - f32, - f64, - f80, - f128, anyopaque, bool, void, type, anyerror, comptime_int, - comptime_float, noreturn, @"anyframe", null, @@ -6377,7 +6467,6 @@ pub const Type = struct { pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; pub fn Type(comptime t: Tag) type { - // Keep in sync with tools/stage2_pretty_printers_common.py return switch (t) { .u1, .u8, @@ -6402,19 +6491,12 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, .anyopaque, .bool, .void, .type, .anyerror, .comptime_int, - .comptime_float, .noreturn, .enum_literal, .null, @@ -6781,16 +6863,17 @@ pub const Type = struct { pub const @"i32" = initTag(.i32); pub const @"i64" = initTag(.i64); - pub const @"f16" = initTag(.f16); - pub const @"f32" = initTag(.f32); - pub const @"f64" = initTag(.f64); - pub const @"f80" = initTag(.f80); - pub const @"f128" = initTag(.f128); + pub const @"f16": Type = .{ .ip_index = .f16_type, .legacy = undefined }; + pub const @"f32": Type = .{ .ip_index = .f32_type, .legacy = undefined }; + pub const @"f64": Type = .{ .ip_index = .f64_type, .legacy = undefined }; + pub const @"f80": Type = .{ .ip_index = .f80_type, .legacy = undefined }; + pub const @"f128": Type = .{ .ip_index = .f128_type, .legacy = undefined }; pub const @"bool" = initTag(.bool); pub const @"usize" = initTag(.usize); pub const @"isize" = initTag(.isize); - pub const @"comptime_int" = initTag(.comptime_int); + pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type, .legacy = undefined }; + pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type, .legacy = undefined }; pub const @"void" = initTag(.void); pub const @"type" = initTag(.type); pub const @"anyerror" = initTag(.anyerror); @@ -6798,6 +6881,8 @@ pub const Type = struct { pub const @"null" = initTag(.null); pub const @"noreturn" = initTag(.noreturn); + pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined }; + pub const err_int = Type.u16; pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type { diff --git a/src/value.zig b/src/value.zig index 7f0e6006f00e..8912209d5e9f 100644 --- a/src/value.zig +++ b/src/value.zig @@ -974,19 +974,19 @@ pub const Value = struct { .c_ulong_type => Type.initTag(.c_ulong), .c_longlong_type => Type.initTag(.c_longlong), .c_ulonglong_type => Type.initTag(.c_ulonglong), - .c_longdouble_type => Type.initTag(.c_longdouble), - .f16_type => Type.initTag(.f16), - .f32_type => Type.initTag(.f32), - .f64_type => Type.initTag(.f64), - .f80_type => Type.initTag(.f80), - .f128_type => Type.initTag(.f128), + .c_longdouble_type => Type.c_longdouble, + .f16_type => Type.f16, + .f32_type => Type.f32, + .f64_type => Type.f64, + .f80_type => Type.f80, + .f128_type => Type.f128, .anyopaque_type => Type.initTag(.anyopaque), .bool_type => Type.initTag(.bool), .void_type => Type.initTag(.void), .type_type => Type.initTag(.type), .anyerror_type => Type.initTag(.anyerror), .comptime_int_type => Type.initTag(.comptime_int), - .comptime_float_type => Type.initTag(.comptime_float), + .comptime_float_type => Type.comptime_float, .noreturn_type => Type.initTag(.noreturn), .null_type => Type.initTag(.null), .undefined_type => Type.initTag(.undefined), From bcd4bb8afbea84d86fd8758b581b141e7086b16b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 16:33:16 -0700 Subject: [PATCH 013/205] stage2: move named int types to InternPool --- src/Sema.zig | 51 +----- src/arch/aarch64/CodeGen.zig | 6 +- src/arch/arm/CodeGen.zig | 10 +- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 4 +- src/codegen.zig | 2 +- src/codegen/c/type.zig | 24 +-- src/codegen/spirv.zig | 2 +- src/type.zig | 301 +++++++---------------------------- src/value.zig | 64 ++++---- 10 files changed, 121 insertions(+), 345 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index f93ceb19e6b5..d03460385e44 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6448,7 +6448,7 @@ fn checkCallArgumentCount( .Optional => { var buf: Type.Payload.ElemType = undefined; const opt_child = callee_ty.optionalChild(&buf); - if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer() and + if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and opt_child.childType().zigTypeTag(mod) == .Fn)) { const msg = msg: { @@ -31421,6 +31421,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_literal, .type_info, => true, + + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -31443,17 +31445,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -31798,6 +31789,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .bool_false => unreachable, .empty_struct => unreachable, .generic_poison => unreachable, + .var_args_param_type => unreachable, .type_info_type => return sema.getBuiltinType("Type"), .extern_options_type => return sema.getBuiltinType("ExternOptions"), @@ -32977,17 +32969,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .bool, .type, .anyerror, @@ -33213,17 +33194,6 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .i64 => return .i64_type, .u128 => return .u128_type, .i128 => return .i128_type, - .usize => return .usize_type, - .isize => return .isize_type, - .c_char => return .c_char_type, - .c_short => return .c_short_type, - .c_ushort => return .c_ushort_type, - .c_int => return .c_int_type, - .c_uint => return .c_uint_type, - .c_long => return .c_long_type, - .c_ulong => return .c_ulong_type, - .c_longlong => return .c_longlong_type, - .c_ulonglong => return .c_ulonglong_type, .anyopaque => return .anyopaque_type, .bool => return .bool_type, .void => return .void_type, @@ -33664,6 +33634,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_literal, .type_info, => true, + + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -33686,17 +33658,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 284663327503..467186619709 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4326,7 +4326,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .x30, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = macho_file.getAtom(atom).getSymbolIndex().?; @@ -4353,7 +4353,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const got_addr = p9.bases.data; const got_index = decl_block.got_index.?; const fn_got_addr = got_addr + got_index * ptr_bytes; - try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr }); + try self.genSetReg(Type.usize, .x30, .{ .memory = fn_got_addr }); } else unreachable; _ = try self.addInst(.{ @@ -5968,7 +5968,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - ptr_bytes, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index eb8cfa97076a..ca4a3826aa8e 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4308,7 +4308,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .lr, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |_| { unreachable; // unsupported architecture for MachO } else { @@ -4326,7 +4326,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); - try self.genSetReg(Type.initTag(.usize), .lr, mcv); + try self.genSetReg(Type.usize, .lr, mcv); } // TODO: add Instruction.supportedOn @@ -5694,7 +5694,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void if (extra_offset) { const offset = if (off <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, off)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.usize), MCValue{ .immediate = off })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off })); _ = try self.addInst(.{ .tag = tag, @@ -5710,7 +5710,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } else { const offset = if (off <= math.maxInt(u12)) blk: { break :blk Instruction.Offset.imm(@intCast(u12, off)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.usize), MCValue{ .immediate = off }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off }), .none); _ = try self.addInst(.{ .tag = tag, @@ -5916,7 +5916,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(8, 8, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - 4, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - 4, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 4ab798fe9c50..488b937141d0 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1749,7 +1749,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jalr, .data = .{ .i_type = .{ diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index e79a216315ca..343cc2f90edd 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -883,7 +883,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - ptr_bytes, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1352,7 +1352,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file)); } else unreachable; - try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jmpl, diff --git a/src/codegen.zig b/src/codegen.zig index 6846bebe6b17..295409781e90 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -371,7 +371,7 @@ pub fn generateSymbol( // generate length switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.initTag(.usize), + .ty = Type.usize, .val = slice.len, }, code, debug_output, reloc_info)) { .ok => {}, diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index b964d16bd958..d2487536701b 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1359,18 +1359,18 @@ pub const CType = extern union { self.* = undefined; if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) self.init(.void) - else if (ty.isAbiInt(mod)) switch (ty.tag()) { - .usize => self.init(.uintptr_t), - .isize => self.init(.intptr_t), - .c_char => self.init(.char), - .c_short => self.init(.short), - .c_ushort => self.init(.@"unsigned short"), - .c_int => self.init(.int), - .c_uint => self.init(.@"unsigned int"), - .c_long => self.init(.long), - .c_ulong => self.init(.@"unsigned long"), - .c_longlong => self.init(.@"long long"), - .c_ulonglong => self.init(.@"unsigned long long"), + else if (ty.isAbiInt(mod)) switch (ty.ip_index) { + .usize_type => self.init(.uintptr_t), + .isize_type => self.init(.intptr_t), + .c_char_type => self.init(.char), + .c_short_type => self.init(.short), + .c_ushort_type => self.init(.@"unsigned short"), + .c_int_type => self.init(.int), + .c_uint_type => self.init(.@"unsigned int"), + .c_long_type => self.init(.long), + .c_ulong_type => self.init(.@"unsigned long"), + .c_longlong_type => self.init(.@"long long"), + .c_ulonglong_type => self.init(.@"unsigned long long"), else => switch (tagFromIntInfo(ty.intInfo(mod))) { .void => unreachable, else => |t| self.init(t), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 41abbde1a0f1..90c2d93458ce 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2499,7 +2499,7 @@ pub const DeclGen = struct { const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. const elem_ty_ref = try self.resolveType(elem_ty, .direct); const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace())); - if (ptr_ty.isSinglePointer()) { + if (ptr_ty.isSinglePointer(mod)) { // Pointer-to-array. In this case, the resulting pointer is not of the same type // as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain. return try self.accessChain(elem_ptr_ty_ref, ptr_id, &.{index_id}); diff --git a/src/type.zig b/src/type.zig index 205a7327102c..4e8d0b9e2076 100644 --- a/src/type.zig +++ b/src/type.zig @@ -121,17 +121,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, => return .Int, .error_set, @@ -621,19 +610,6 @@ pub const Type = struct { switch (a.tag()) { .generic_poison => unreachable, - // Detect that e.g. u64 != usize, even if the bits match on a particular target. - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .bool, .void, .type, @@ -1013,22 +989,6 @@ pub const Type = struct { switch (ty.tag()) { .generic_poison => unreachable, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - => |ty_tag| { - std.hash.autoHash(hasher, std.builtin.TypeId.Int); - std.hash.autoHash(hasher, ty_tag); - }, - .bool => std.hash.autoHash(hasher, std.builtin.TypeId.Bool), .void => std.hash.autoHash(hasher, std.builtin.TypeId.Void), .type => std.hash.autoHash(hasher, std.builtin.TypeId.Type), @@ -1345,17 +1305,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -1631,17 +1580,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -2020,17 +1958,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -2322,17 +2249,6 @@ pub const Type = struct { .i32 => return Value.initTag(.i32_type), .u64 => return Value.initTag(.u64_type), .i64 => return Value.initTag(.i64_type), - .usize => return Value.initTag(.usize_type), - .isize => return Value.initTag(.isize_type), - .c_char => return Value.initTag(.c_char_type), - .c_short => return Value.initTag(.c_short_type), - .c_ushort => return Value.initTag(.c_ushort_type), - .c_int => return Value.initTag(.c_int_type), - .c_uint => return Value.initTag(.c_uint_type), - .c_long => return Value.initTag(.c_long_type), - .c_ulong => return Value.initTag(.c_ulong_type), - .c_longlong => return Value.initTag(.c_longlong_type), - .c_ulonglong => return Value.initTag(.c_ulonglong_type), .anyopaque => return Value.initTag(.anyopaque_type), .bool => return Value.initTag(.bool_type), .void => return Value.initTag(.void_type), @@ -2462,17 +2378,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .bool, .anyerror, .const_slice_u8, @@ -2713,6 +2618,8 @@ pub const Type = struct { .type_info, .generic_poison, => false, + + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -2734,17 +2641,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .bool, .void, .manyptr_u8, @@ -3040,7 +2936,7 @@ pub const Type = struct { .export_options, .extern_options, .@"anyframe", - => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, @@ -3089,6 +2985,7 @@ pub const Type = struct { .noreturn => unreachable, .generic_poison => unreachable, + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -3130,8 +3027,6 @@ pub const Type = struct { return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; }, - .isize, - .usize, .single_const_pointer_to_comptime_int, .const_slice_u8, .const_slice_u8_sentinel_0, @@ -3153,16 +3048,6 @@ pub const Type = struct { .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, - .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, - .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) }, - .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) }, - .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) }, - .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) }, - .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) }, - .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) }, - .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) }, - // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, .anyerror, @@ -3491,7 +3376,7 @@ pub const Type = struct { .usize, .isize, .@"anyframe", - => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, @@ -3524,6 +3409,7 @@ pub const Type = struct { .type_info => unreachable, .noreturn => unreachable, .generic_poison => unreachable, + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -3666,8 +3552,6 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = result }; }, - .isize, - .usize, .@"anyframe", .anyframe_T, .optional_single_const_pointer, @@ -3694,16 +3578,6 @@ pub const Type = struct { else => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, }, - .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, - .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, - .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, - .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, - .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, - .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, - .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, - .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, - .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, - // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, .anyerror, @@ -3856,7 +3730,7 @@ pub const Type = struct { .usize, .isize, .@"anyframe", - => return target.cpu.arch.ptrBitWidth(), + => return target.ptrBitWidth(), .c_char => return target.c_type_bit_size(.char), .c_short => return target.c_type_bit_size(.short), @@ -3896,6 +3770,7 @@ pub const Type = struct { .export_options => unreachable, // missing call to resolveTypeFields .extern_options => unreachable, // missing call to resolveTypeFields .type_info => unreachable, // missing call to resolveTypeFields + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -4000,8 +3875,6 @@ pub const Type = struct { return payload.len * 8 * elem_size + elem_bit_size; }, - .isize, - .usize, .@"anyframe", .anyframe_T, => return target.ptrBitWidth(), @@ -4040,16 +3913,6 @@ pub const Type = struct { .manyptr_const_u8_sentinel_0, => return target.ptrBitWidth(), - .c_char => return target.c_type_bit_size(.char), - .c_short => return target.c_type_bit_size(.short), - .c_ushort => return target.c_type_bit_size(.ushort), - .c_int => return target.c_type_bit_size(.int), - .c_uint => return target.c_type_bit_size(.uint), - .c_long => return target.c_type_bit_size(.long), - .c_ulong => return target.c_type_bit_size(.ulong), - .c_longlong => return target.c_type_bit_size(.longlong), - .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .error_set, .error_set_single, .anyerror_void_error_union, @@ -4876,12 +4739,6 @@ pub const Type = struct { }; return switch (ty.tag()) { .i8, - .isize, - .c_char, - .c_short, - .c_int, - .c_long, - .c_longlong, .i16, .i32, .i64, @@ -4903,11 +4760,6 @@ pub const Type = struct { else => return false, }; return switch (ty.tag()) { - .usize, - .c_ushort, - .c_uint, - .c_ulong, - .c_ulonglong, .u1, .u8, .u16, @@ -4938,13 +4790,26 @@ pub const Type = struct { if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| return int_type, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), + .ptr_type => unreachable, + .array_type => unreachable, .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), - .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), - .struct_type => unreachable, + .optional_type => unreachable, + .error_union_type => unreachable, + .simple_type => |t| switch (t) { + .usize => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, + .isize => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, + .c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, + .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, + .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, + .c_int => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, + .c_uint => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, + .c_long => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, + .c_ulong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, + .c_longlong => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, + .c_ulonglong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, + else => unreachable, + }, + .struct_type => @panic("TODO"), .union_type => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -4965,17 +4830,6 @@ pub const Type = struct { .i64 => return .{ .signedness = .signed, .bits = 64 }, .u128 => return .{ .signedness = .unsigned, .bits = 128 }, .i128 => return .{ .signedness = .signed, .bits = 128 }, - .usize => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, - .isize => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, - .c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, - .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, - .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, - .c_int => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, - .c_uint => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, - .c_long => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, - .c_ulong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, - .c_longlong => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, - .c_ulonglong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, .enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty, .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, @@ -5003,19 +4857,19 @@ pub const Type = struct { }; } - pub fn isNamedInt(self: Type) bool { - return switch (self.tag()) { - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, + pub fn isNamedInt(ty: Type) bool { + return switch (ty.ip_index) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, => true, else => false, @@ -5180,17 +5034,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, => true, else => false, @@ -5284,17 +5127,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .bool, .type, .anyerror, @@ -5502,6 +5334,8 @@ pub const Type = struct { .enum_literal, .type_info, => true, + + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -5524,17 +5358,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -6372,17 +6195,6 @@ pub const Type = struct { i64, u128, i128, - usize, - isize, - c_char, - c_short, - c_ushort, - c_int, - c_uint, - c_long, - c_ulong, - c_longlong, - c_ulonglong, anyopaque, bool, void, @@ -6480,17 +6292,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -6859,9 +6660,13 @@ pub const Type = struct { pub const @"u29" = initTag(.u29); pub const @"u32" = initTag(.u32); pub const @"u64" = initTag(.u64); + pub const @"u128" = initTag(.u128); + pub const @"i8" = initTag(.i8); + pub const @"i16" = initTag(.i16); pub const @"i32" = initTag(.i32); pub const @"i64" = initTag(.i64); + pub const @"i128" = initTag(.i128); pub const @"f16": Type = .{ .ip_index = .f16_type, .legacy = undefined }; pub const @"f32": Type = .{ .ip_index = .f32_type, .legacy = undefined }; @@ -6870,8 +6675,8 @@ pub const Type = struct { pub const @"f128": Type = .{ .ip_index = .f128_type, .legacy = undefined }; pub const @"bool" = initTag(.bool); - pub const @"usize" = initTag(.usize); - pub const @"isize" = initTag(.isize); + pub const @"usize": Type = .{ .ip_index = .usize_type, .legacy = undefined }; + pub const @"isize": Type = .{ .ip_index = .isize_type, .legacy = undefined }; pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type, .legacy = undefined }; pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type, .legacy = undefined }; pub const @"void" = initTag(.void); @@ -6879,8 +6684,18 @@ pub const Type = struct { pub const @"anyerror" = initTag(.anyerror); pub const @"anyopaque" = initTag(.anyopaque); pub const @"null" = initTag(.null); + pub const @"undefined" = initTag(.undefined); pub const @"noreturn" = initTag(.noreturn); + pub const @"c_char": Type = .{ .ip_index = .c_char_type, .legacy = undefined }; + pub const @"c_short": Type = .{ .ip_index = .c_short_type, .legacy = undefined }; + pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type, .legacy = undefined }; + pub const @"c_int": Type = .{ .ip_index = .c_int_type, .legacy = undefined }; + pub const @"c_uint": Type = .{ .ip_index = .c_uint_type, .legacy = undefined }; + pub const @"c_long": Type = .{ .ip_index = .c_long_type, .legacy = undefined }; + pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type, .legacy = undefined }; + pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type, .legacy = undefined }; + pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type, .legacy = undefined }; pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined }; pub const err_int = Type.u16; diff --git a/src/value.zig b/src/value.zig index 8912209d5e9f..b0484dfc7685 100644 --- a/src/value.zig +++ b/src/value.zig @@ -951,45 +951,45 @@ pub const Value = struct { } return switch (self.tag()) { .ty => self.castTag(.ty).?.data, - .u1_type => Type.initTag(.u1), - .u8_type => Type.initTag(.u8), - .i8_type => Type.initTag(.i8), - .u16_type => Type.initTag(.u16), - .i16_type => Type.initTag(.i16), - .u29_type => Type.initTag(.u29), - .u32_type => Type.initTag(.u32), - .i32_type => Type.initTag(.i32), - .u64_type => Type.initTag(.u64), - .i64_type => Type.initTag(.i64), - .u128_type => Type.initTag(.u128), - .i128_type => Type.initTag(.i128), - .usize_type => Type.initTag(.usize), - .isize_type => Type.initTag(.isize), - .c_char_type => Type.initTag(.c_char), - .c_short_type => Type.initTag(.c_short), - .c_ushort_type => Type.initTag(.c_ushort), - .c_int_type => Type.initTag(.c_int), - .c_uint_type => Type.initTag(.c_uint), - .c_long_type => Type.initTag(.c_long), - .c_ulong_type => Type.initTag(.c_ulong), - .c_longlong_type => Type.initTag(.c_longlong), - .c_ulonglong_type => Type.initTag(.c_ulonglong), + .u1_type => Type.u1, + .u8_type => Type.u8, + .i8_type => Type.i8, + .u16_type => Type.u16, + .i16_type => Type.i16, + .u29_type => Type.u29, + .u32_type => Type.u32, + .i32_type => Type.i32, + .u64_type => Type.u64, + .i64_type => Type.i64, + .u128_type => Type.u128, + .i128_type => Type.i128, + .usize_type => Type.usize, + .isize_type => Type.isize, + .c_char_type => Type.c_char, + .c_short_type => Type.c_short, + .c_ushort_type => Type.c_ushort, + .c_int_type => Type.c_int, + .c_uint_type => Type.c_uint, + .c_long_type => Type.c_long, + .c_ulong_type => Type.c_ulong, + .c_longlong_type => Type.c_longlong, + .c_ulonglong_type => Type.c_ulonglong, .c_longdouble_type => Type.c_longdouble, .f16_type => Type.f16, .f32_type => Type.f32, .f64_type => Type.f64, .f80_type => Type.f80, .f128_type => Type.f128, - .anyopaque_type => Type.initTag(.anyopaque), - .bool_type => Type.initTag(.bool), - .void_type => Type.initTag(.void), - .type_type => Type.initTag(.type), - .anyerror_type => Type.initTag(.anyerror), - .comptime_int_type => Type.initTag(.comptime_int), + .anyopaque_type => Type.anyopaque, + .bool_type => Type.bool, + .void_type => Type.void, + .type_type => Type.type, + .anyerror_type => Type.anyerror, + .comptime_int_type => Type.comptime_int, .comptime_float_type => Type.comptime_float, - .noreturn_type => Type.initTag(.noreturn), - .null_type => Type.initTag(.null), - .undefined_type => Type.initTag(.undefined), + .noreturn_type => Type.noreturn, + .null_type => Type.null, + .undefined_type => Type.undefined, .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), .anyframe_type => Type.initTag(.@"anyframe"), .const_slice_u8_type => Type.initTag(.const_slice_u8), From 836d8a1f64cb811641e621799429c54f222717eb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 18:11:07 -0700 Subject: [PATCH 014/205] stage2: move most simple types to InternPool --- src/Air.zig | 4 +- src/Module.zig | 51 +-- src/Sema.zig | 503 ++++++++++-------------- src/arch/aarch64/CodeGen.zig | 7 +- src/arch/arm/CodeGen.zig | 7 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/wasm/CodeGen.zig | 10 +- src/arch/x86_64/CodeGen.zig | 8 +- src/codegen/c.zig | 16 +- src/codegen/llvm.zig | 38 +- src/codegen/spirv.zig | 6 +- src/print_air.zig | 1 - src/type.zig | 739 +++++++---------------------------- src/value.zig | 28 +- 14 files changed, 435 insertions(+), 985 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index be3ae119e49b..1bc9d949e298 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1333,7 +1333,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .ret_load, .unreach, .trap, - => return Type.initTag(.noreturn), + => return Type.noreturn, .breakpoint, .dbg_stmt, @@ -1370,7 +1370,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .wasm_memory_grow => return Type.i32, .wasm_memory_size => return Type.u32, - .bool_to_int => return Type.initTag(.u1), + .bool_to_int => return Type.u1, .tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0), diff --git a/src/Module.zig b/src/Module.zig index a4ae107bede2..77c20fbcc668 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1005,7 +1005,7 @@ pub const Struct = struct { /// If the layout is packed, this is the backing integer type of the packed struct. /// Whether zig chooses this type or the user specifies it, it is stored here. /// This will be set to the noreturn type until status is `have_layout`. - backing_int_ty: Type = Type.initTag(.noreturn), + backing_int_ty: Type = Type.noreturn, status: enum { none, field_types_wip, @@ -1705,31 +1705,34 @@ pub const Fn = struct { is_resolved: bool = false, pub fn addErrorSet(self: *InferredErrorSet, gpa: Allocator, err_set_ty: Type) !void { - switch (err_set_ty.tag()) { - .error_set => { - const names = err_set_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - try self.errors.put(gpa, name, {}); - } - }, - .error_set_single => { - const name = err_set_ty.castTag(.error_set_single).?.data; - try self.errors.put(gpa, name, {}); - }, - .error_set_inferred => { - const ies = err_set_ty.castTag(.error_set_inferred).?.data; - try self.inferred_error_sets.put(gpa, ies, {}); + switch (err_set_ty.ip_index) { + .anyerror_type => { + self.is_anyerror = true; }, - .error_set_merged => { - const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { + .none => switch (err_set_ty.tag()) { + .error_set => { + const names = err_set_ty.castTag(.error_set).?.data.names.keys(); + for (names) |name| { + try self.errors.put(gpa, name, {}); + } + }, + .error_set_single => { + const name = err_set_ty.castTag(.error_set_single).?.data; try self.errors.put(gpa, name, {}); - } - }, - .anyerror => { - self.is_anyerror = true; + }, + .error_set_inferred => { + const ies = err_set_ty.castTag(.error_set_inferred).?.data; + try self.inferred_error_sets.put(gpa, ies, {}); + }, + .error_set_merged => { + const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); + for (names) |name| { + try self.errors.put(gpa, name, {}); + } + }, + else => unreachable, }, - else => unreachable, + else => @panic("TODO"), } } }; @@ -4566,7 +4569,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { const struct_obj = try new_decl_arena_allocator.create(Module.Struct); const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); - const ty_ty = comptime Type.initTag(.type); + const ty_ty = comptime Type.type; struct_obj.* = .{ .owner_decl = undefined, // set below .fields = .{}, diff --git a/src/Sema.zig b/src/Sema.zig index d03460385e44..ea8258717b5f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1776,7 +1776,7 @@ fn analyzeAsType( src: LazySrcLoc, air_inst: Air.Inst.Ref, ) !Type { - const wanted_type = Type.initTag(.type); + const wanted_type = Type.type; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, "types must be comptime-known"); const ty = val.toType(); @@ -3132,7 +3132,7 @@ fn zirUnionDecl( errdefer mod.abortAnonDecl(new_decl_index); union_obj.* = .{ .owner_decl = new_decl_index, - .tag_ty = Type.initTag(.null), + .tag_ty = Type.null, .fields = .{}, .zir_index = inst, .layout = small.layout, @@ -6362,7 +6362,7 @@ fn zirCall( if (arg_index >= fn_params_len) break :inst Air.Inst.Ref.var_args_param_type; - if (func_ty_info.param_types[arg_index].tag() == .generic_poison) + if (func_ty_info.param_types[arg_index].isGenericPoison()) break :inst Air.Inst.Ref.generic_poison_type; break :inst try sema.addType(func_ty_info.param_types[arg_index]); @@ -8175,7 +8175,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)}); // Anything merged with anyerror is anyerror. - if (lhs_ty.tag() == .anyerror or rhs_ty.tag() == .anyerror) { + if (lhs_ty.ip_index == .anyerror_type or rhs_ty.ip_index == .anyerror_type) { return Air.Inst.Ref.anyerror_type; } @@ -8206,7 +8206,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); return sema.addConstant( - Type.initTag(.enum_literal), + .{ .ip_index = .enum_literal_type, .legacy = undefined }, try Value.Tag.enum_literal.create(sema.arena, duped_name), ); } @@ -8503,6 +8503,7 @@ fn analyzeErrUnionPayload( operand_src: LazySrcLoc, safety_check: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const payload_ty = err_union_ty.errorUnionPayload(); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { if (val.getError()) |name| { @@ -8516,7 +8517,7 @@ fn analyzeErrUnionPayload( // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and - !err_union_ty.errorUnionSet().errorSetIsEmpty()) + !err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, operand, .unwrap_errunion_err, .is_non_err); } @@ -8602,7 +8603,7 @@ fn analyzeErrUnionPayloadPtr( // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and - !err_union_ty.errorUnionSet().errorSetIsEmpty()) + !err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr); } @@ -8701,7 +8702,7 @@ fn zirFunc( break :blk ret_ty; } else |err| switch (err) { error.GenericPoison => { - break :blk Type.initTag(.generic_poison); + break :blk Type.generic_poison; }, else => |e| return e, } @@ -8778,7 +8779,7 @@ fn resolveGenericBody( }; switch (err) { error.GenericPoison => { - if (dest_ty.tag() == .type) { + if (dest_ty.ip_index == .type_type) { return Value.initTag(.generic_poison_type); } else { return Value.initTag(.generic_poison); @@ -9319,7 +9320,7 @@ fn zirParam( // We result the param instruction with a poison value and // insert an anytype parameter. try block.params.append(sema.gpa, .{ - .ty = Type.initTag(.generic_poison), + .ty = Type.generic_poison, .is_comptime = comptime_syntax, .name = param_name, }); @@ -9340,7 +9341,7 @@ fn zirParam( // We result the param instruction with a poison value and // insert an anytype parameter. try block.params.append(sema.gpa, .{ - .ty = Type.initTag(.generic_poison), + .ty = Type.generic_poison, .is_comptime = comptime_syntax, .name = param_name, }); @@ -9438,7 +9439,7 @@ fn zirParamAnytype( // We are evaluating a generic function without any comptime args provided. try block.params.append(sema.gpa, .{ - .ty = Type.initTag(.generic_poison), + .ty = Type.generic_poison, .is_comptime = comptime_syntax, .name = param_name, }); @@ -18877,7 +18878,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in }, .ErrorSet => { const payload_val = union_val.val.optionalValue(mod) orelse - return sema.addType(Type.initTag(.anyerror)); + return sema.addType(Type.anyerror); const slice_val = payload_val.castTag(.slice).?.data; const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod)); @@ -19150,7 +19151,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in errdefer mod.abortAnonDecl(new_decl_index); union_obj.* = .{ .owner_decl = new_decl_index, - .tag_ty = Type.initTag(.null), + .tag_ty = Type.null, .fields = .{}, .zir_index = inst, .layout = layout, @@ -22697,7 +22698,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += 1; const ret_ty_tv = sema.resolveInstConst(block, ret_src, ret_ty_ref, "return type must be comptime-known") catch |err| switch (err) { error.GenericPoison => { - break :blk Type.initTag(.generic_poison); + break :blk Type.generic_poison; }, else => |e| return e, }; @@ -23022,7 +23023,7 @@ fn zirBuiltinExtern( new_decl.src_line = sema.owner_decl.src_line; // We only access this decl through the decl_ref with the correct type created // below, so this type doesn't matter - new_decl.ty = Type.Tag.init(.anyopaque); + new_decl.ty = Type.anyopaque; new_decl.val = try Value.Tag.variable.create(new_decl_arena_allocator, new_var); new_decl.@"align" = 0; new_decl.@"linksection" = null; @@ -24380,9 +24381,8 @@ fn fieldCallBind( decl_type.fnParamLen() >= 1) { const first_param_type = decl_type.fnParamType(0); - const first_param_tag = first_param_type.tag(); // zig fmt: off - if (first_param_tag == .generic_poison or ( + if (first_param_type.isGenericPoison() or ( first_param_type.zigTypeTag(mod) == .Pointer and (first_param_type.ptrSize() == .One or first_param_type.ptrSize() == .C) and @@ -25535,10 +25535,7 @@ fn coerceExtra( inst_src: LazySrcLoc, opts: CoerceOpts, ) CoersionError!Air.Inst.Ref { - switch (dest_ty_unresolved.tag()) { - .generic_poison => return inst, - else => {}, - } + if (dest_ty_unresolved.isGenericPoison()) return inst; const dest_ty_src = inst_src; // TODO better source location const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); const inst_ty = try sema.resolveTypeFields(sema.typeOf(inst)); @@ -25577,7 +25574,8 @@ fn coerceExtra( // cast from ?*T and ?[*]T to ?*anyopaque // but don't do it if the source type is a double pointer - if (dest_ty.isPtrLikeOptional(mod) and dest_ty.elemType2(mod).tag() == .anyopaque and + if (dest_ty.isPtrLikeOptional(mod) and + dest_ty.elemType2(mod).ip_index == .anyopaque_type and inst_ty.isPtrAtRuntime(mod)) anyopaque_check: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional; @@ -25715,7 +25713,7 @@ fn coerceExtra( // cast from *T and [*]T to *anyopaque // but don't do it if the source type is a double pointer - if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { + if (dest_info.pointee_type.ip_index == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const elem_ty = inst_ty.elemType2(mod); if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { @@ -26759,6 +26757,8 @@ fn coerceInMemoryAllowedErrorSets( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { + const mod = sema.mod; + // Coercion to `anyerror`. Note that this check can return false negatives // in case the error sets did not get resolved. if (dest_ty.isAnyError()) { @@ -26769,36 +26769,41 @@ fn coerceInMemoryAllowedErrorSets( const dst_ies = dst_payload.data; // We will make an effort to return `ok` without resolving either error set, to // avoid unnecessary "unable to resolve error set" dependency loop errors. - switch (src_ty.tag()) { - .error_set_inferred => { - // If both are inferred error sets of functions, and - // the dest includes the source function, the coercion is OK. - // This check is important because it works without forcing a full resolution - // of inferred error sets. - const src_ies = src_ty.castTag(.error_set_inferred).?.data; - - if (dst_ies.inferred_error_sets.contains(src_ies)) { - return .ok; - } - }, - .error_set_single => { - const name = src_ty.castTag(.error_set_single).?.data; - if (dst_ies.errors.contains(name)) return .ok; - }, - .error_set_merged => { - const names = src_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - if (!dst_ies.errors.contains(name)) break; - } else return .ok; + switch (src_ty.ip_index) { + .none => switch (src_ty.tag()) { + .error_set_inferred => { + // If both are inferred error sets of functions, and + // the dest includes the source function, the coercion is OK. + // This check is important because it works without forcing a full resolution + // of inferred error sets. + const src_ies = src_ty.castTag(.error_set_inferred).?.data; + + if (dst_ies.inferred_error_sets.contains(src_ies)) { + return .ok; + } + }, + .error_set_single => { + const name = src_ty.castTag(.error_set_single).?.data; + if (dst_ies.errors.contains(name)) return .ok; + }, + .error_set_merged => { + const names = src_ty.castTag(.error_set_merged).?.data.keys(); + for (names) |name| { + if (!dst_ies.errors.contains(name)) break; + } else return .ok; + }, + .error_set => { + const names = src_ty.castTag(.error_set).?.data.names.keys(); + for (names) |name| { + if (!dst_ies.errors.contains(name)) break; + } else return .ok; + }, + else => unreachable, }, - .error_set => { - const names = src_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - if (!dst_ies.errors.contains(name)) break; - } else return .ok; + .anyerror_type => {}, + else => switch (mod.intern_pool.indexToKey(src_ty.ip_index)) { + else => @panic("TODO"), }, - .anyerror => {}, - else => unreachable, } if (dst_ies.func == sema.owner_func) { @@ -26818,79 +26823,87 @@ fn coerceInMemoryAllowedErrorSets( var missing_error_buf = std.ArrayList([]const u8).init(sema.gpa); defer missing_error_buf.deinit(); - switch (src_ty.tag()) { - .error_set_inferred => { - const src_data = src_ty.castTag(.error_set_inferred).?.data; + switch (src_ty.ip_index) { + .none => switch (src_ty.tag()) { + .error_set_inferred => { + const src_data = src_ty.castTag(.error_set_inferred).?.data; - try sema.resolveInferredErrorSet(block, src_src, src_data); - // src anyerror status might have changed after the resolution. - if (src_ty.isAnyError()) { - // dest_ty.isAnyError() == true is already checked for at this point. - return .from_anyerror; - } + try sema.resolveInferredErrorSet(block, src_src, src_data); + // src anyerror status might have changed after the resolution. + if (src_ty.isAnyError()) { + // dest_ty.isAnyError() == true is already checked for at this point. + return .from_anyerror; + } - for (src_data.errors.keys()) |key| { - if (!dest_ty.errorSetHasField(key)) { - try missing_error_buf.append(key); + for (src_data.errors.keys()) |key| { + if (!dest_ty.errorSetHasField(key)) { + try missing_error_buf.append(key); + } } - } - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } + if (missing_error_buf.items.len != 0) { + return InMemoryCoercionResult{ + .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), + }; + } - return .ok; - }, - .error_set_single => { - const name = src_ty.castTag(.error_set_single).?.data; - if (dest_ty.errorSetHasField(name)) { return .ok; - } - const list = try sema.arena.alloc([]const u8, 1); - list[0] = name; - return InMemoryCoercionResult{ .missing_error = list }; - }, - .error_set_merged => { - const names = src_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - if (!dest_ty.errorSetHasField(name)) { - try missing_error_buf.append(name); + }, + .error_set_single => { + const name = src_ty.castTag(.error_set_single).?.data; + if (dest_ty.errorSetHasField(name)) { + return .ok; + } + const list = try sema.arena.alloc([]const u8, 1); + list[0] = name; + return InMemoryCoercionResult{ .missing_error = list }; + }, + .error_set_merged => { + const names = src_ty.castTag(.error_set_merged).?.data.keys(); + for (names) |name| { + if (!dest_ty.errorSetHasField(name)) { + try missing_error_buf.append(name); + } } - } - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } + if (missing_error_buf.items.len != 0) { + return InMemoryCoercionResult{ + .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), + }; + } - return .ok; - }, - .error_set => { - const names = src_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - if (!dest_ty.errorSetHasField(name)) { - try missing_error_buf.append(name); + return .ok; + }, + .error_set => { + const names = src_ty.castTag(.error_set).?.data.names.keys(); + for (names) |name| { + if (!dest_ty.errorSetHasField(name)) { + try missing_error_buf.append(name); + } } - } - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } + if (missing_error_buf.items.len != 0) { + return InMemoryCoercionResult{ + .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), + }; + } - return .ok; - }, - .anyerror => switch (dest_ty.tag()) { - .error_set_inferred => unreachable, // Caught by dest_ty.isAnyError() above. - .error_set_single, .error_set_merged, .error_set => return .from_anyerror, - .anyerror => unreachable, // Filtered out above. + return .ok; + }, else => unreachable, }, - else => unreachable, + + .anyerror_type => switch (dest_ty.ip_index) { + .none => switch (dest_ty.tag()) { + .error_set_inferred => unreachable, // Caught by dest_ty.isAnyError() above. + .error_set_single, .error_set_merged, .error_set => return .from_anyerror, + else => unreachable, + }, + .anyerror_type => unreachable, // Filtered out above. + else => @panic("TODO"), + }, + + else => @panic("TODO"), } unreachable; @@ -29355,42 +29368,49 @@ fn analyzeIsNonErrComptimeOnly( // exception if the error union error set is known to be empty, // we allow the comparison but always make it comptime-known. const set_ty = operand_ty.errorUnionSet(); - switch (set_ty.tag()) { - .anyerror => {}, - .error_set_inferred => blk: { - // If the error set is empty, we must return a comptime true or false. - // However we want to avoid unnecessarily resolving an inferred error set - // in case it is already non-empty. - const ies = set_ty.castTag(.error_set_inferred).?.data; - if (ies.is_anyerror) break :blk; - if (ies.errors.count() != 0) break :blk; - if (maybe_operand_val == null) { - // Try to avoid resolving inferred error set if possible. - if (ies.errors.count() != 0) break :blk; + switch (set_ty.ip_index) { + .none => switch (set_ty.tag()) { + .error_set_inferred => blk: { + // If the error set is empty, we must return a comptime true or false. + // However we want to avoid unnecessarily resolving an inferred error set + // in case it is already non-empty. + const ies = set_ty.castTag(.error_set_inferred).?.data; if (ies.is_anyerror) break :blk; - for (ies.inferred_error_sets.keys()) |other_ies| { - if (ies == other_ies) continue; - try sema.resolveInferredErrorSet(block, src, other_ies); - if (other_ies.is_anyerror) { - ies.is_anyerror = true; - ies.is_resolved = true; - break :blk; - } + if (ies.errors.count() != 0) break :blk; + if (maybe_operand_val == null) { + // Try to avoid resolving inferred error set if possible. + if (ies.errors.count() != 0) break :blk; + if (ies.is_anyerror) break :blk; + for (ies.inferred_error_sets.keys()) |other_ies| { + if (ies == other_ies) continue; + try sema.resolveInferredErrorSet(block, src, other_ies); + if (other_ies.is_anyerror) { + ies.is_anyerror = true; + ies.is_resolved = true; + break :blk; + } - if (other_ies.errors.count() != 0) break :blk; - } - if (ies.func == sema.owner_func) { - // We're checking the inferred errorset of the current function and none of - // its child inferred error sets contained any errors meaning that any value - // so far with this type can't contain errors either. - return Air.Inst.Ref.bool_true; + if (other_ies.errors.count() != 0) break :blk; + } + if (ies.func == sema.owner_func) { + // We're checking the inferred errorset of the current function and none of + // its child inferred error sets contained any errors meaning that any value + // so far with this type can't contain errors either. + return Air.Inst.Ref.bool_true; + } + try sema.resolveInferredErrorSet(block, src, ies); + if (ies.is_anyerror) break :blk; + if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true; } - try sema.resolveInferredErrorSet(block, src, ies); - if (ies.is_anyerror) break :blk; - if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true; - } + }, + else => if (set_ty.errorSetNames().len == 0) return Air.Inst.Ref.bool_true, + }, + + .anyerror_type => {}, + + else => switch (mod.intern_pool.indexToKey(set_ty.ip_index)) { + else => @panic("TODO"), }, - else => if (set_ty.errorSetNames().len == 0) return Air.Inst.Ref.bool_true, } if (maybe_operand_val) |err_union| { @@ -30308,43 +30328,48 @@ fn wrapErrorUnionSet( const inst_ty = sema.typeOf(inst); const dest_err_set_ty = dest_ty.errorUnionSet(); if (try sema.resolveMaybeUndefVal(inst)) |val| { - switch (dest_err_set_ty.tag()) { - .anyerror => {}, - .error_set_single => ok: { - const expected_name = val.castTag(.@"error").?.data.name; - const n = dest_err_set_ty.castTag(.error_set_single).?.data; - if (mem.eql(u8, expected_name, n)) break :ok; - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - }, - .error_set => { - const expected_name = val.castTag(.@"error").?.data.name; - const error_set = dest_err_set_ty.castTag(.error_set).?.data; - if (!error_set.names.contains(expected_name)) { + switch (dest_err_set_ty.ip_index) { + .anyerror_type => {}, + + .none => switch (dest_err_set_ty.tag()) { + .error_set_single => ok: { + const expected_name = val.castTag(.@"error").?.data.name; + const n = dest_err_set_ty.castTag(.error_set_single).?.data; + if (mem.eql(u8, expected_name, n)) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - } - }, - .error_set_inferred => ok: { - const expected_name = val.castTag(.@"error").?.data.name; - const ies = dest_err_set_ty.castTag(.error_set_inferred).?.data; - - // We carefully do this in an order that avoids unnecessarily - // resolving the destination error set type. - if (ies.is_anyerror) break :ok; - if (ies.errors.contains(expected_name)) break :ok; - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { - break :ok; - } + }, + .error_set => { + const expected_name = val.castTag(.@"error").?.data.name; + const error_set = dest_err_set_ty.castTag(.error_set).?.data; + if (!error_set.names.contains(expected_name)) { + return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); + } + }, + .error_set_inferred => ok: { + const expected_name = val.castTag(.@"error").?.data.name; + const ies = dest_err_set_ty.castTag(.error_set_inferred).?.data; + + // We carefully do this in an order that avoids unnecessarily + // resolving the destination error set type. + if (ies.is_anyerror) break :ok; + if (ies.errors.contains(expected_name)) break :ok; + if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { + break :ok; + } - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - }, - .error_set_merged => { - const expected_name = val.castTag(.@"error").?.data.name; - const error_set = dest_err_set_ty.castTag(.error_set_merged).?.data; - if (!error_set.contains(expected_name)) { return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - } + }, + .error_set_merged => { + const expected_name = val.castTag(.@"error").?.data.name; + const error_set = dest_err_set_ty.castTag(.error_set_merged).?.data; + if (!error_set.contains(expected_name)) { + return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); + } + }, + else => unreachable, }, - else => unreachable, + + else => @panic("TODO"), } return sema.addConstant(dest_ty, val); } @@ -30380,7 +30405,7 @@ fn resolvePeerTypes( ) !Type { const mod = sema.mod; switch (instructions.len) { - 0 => return Type.initTag(.noreturn), + 0 => return Type.noreturn, 1 => return sema.typeOf(instructions[0]), else => {}, } @@ -31445,24 +31470,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .anyerror, - .noreturn, - .@"anyframe", - .null, - .undefined, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, + .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -31476,17 +31484,12 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_inferred, .error_set_merged, .@"opaque", - .generic_poison, .array_u8, .array_u8_sentinel_0, .enum_simple, => false, .single_const_pointer_to_comptime_int, - .type, - .comptime_int, - .enum_literal, - .type_info, .function, => true, @@ -31709,17 +31712,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { try sema.resolveTypeFieldsUnion(ty, union_obj); return ty; }, - .type_info => return sema.getBuiltinType("Type"), - .extern_options => return sema.getBuiltinType("ExternOptions"), - .export_options => return sema.getBuiltinType("ExportOptions"), - .atomic_order => return sema.getBuiltinType("AtomicOrder"), - .atomic_rmw_op => return sema.getBuiltinType("AtomicRmwOp"), - .calling_convention => return sema.getBuiltinType("CallingConvention"), - .address_space => return sema.getBuiltinType("AddressSpace"), - .float_mode => return sema.getBuiltinType("FloatMode"), - .reduce_op => return sema.getBuiltinType("ReduceOp"), - .modifier => return sema.getBuiltinType("CallModifier"), - .prefetch_options => return sema.getBuiltinType("PrefetchOptions"), else => return ty, }, @@ -31772,6 +31764,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .const_slice_u8_type, .anyerror_void_error_union_type, .generic_poison_type, + .var_args_param_type, .empty_struct_type, => return ty, @@ -31789,7 +31782,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .bool_false => unreachable, .empty_struct => unreachable, .generic_poison => unreachable, - .var_args_param_type => unreachable, .type_info_type => return sema.getBuiltinType("Type"), .extern_options_type => return sema.getBuiltinType("ExternOptions"), @@ -32118,7 +32110,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void return sema.failWithOwnedErrorMsg(msg); } gop.value_ptr.* = .{ - .ty = Type.initTag(.noreturn), + .ty = Type.noreturn, .abi_align = 0, .default_val = Value.initTag(.unreachable_value), .is_comptime = is_comptime, @@ -32552,7 +32544,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const field_ty: Type = if (!has_type) Type.void else if (field_type_ref == .none) - Type.initTag(.noreturn) + Type.noreturn else sema.resolveType(&block_scope, .unneeded, field_type_ref) catch |err| switch (err) { error.NeededSourceLocation => { @@ -32956,7 +32948,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }; switch (ty.tag()) { - .comptime_int, .u1, .u8, .i8, @@ -32969,9 +32960,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .i64, .u128, .i128, - .bool, - .type, - .anyerror, + .error_set_single, .error_set, .error_set_merged, @@ -32984,28 +32973,14 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .const_slice_u8_sentinel_0, .const_slice, .mut_slice, - .anyopaque, .optional_single_mut_pointer, .optional_single_const_pointer, - .enum_literal, .anyerror_void_error_union, .error_set_inferred, .@"opaque", .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", .anyframe_T, .many_const_pointer, .many_mut_pointer, @@ -33138,10 +33113,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .void => return Value.void, - .noreturn => return Value.initTag(.unreachable_value), - .null => return Value.null, - .undefined => return Value.initTag(.undef), .vector, .array, .array_u8 => { if (ty.arrayLen() == 0) @@ -33154,7 +33125,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .generic_poison => return error.GenericPoison, } } @@ -33194,34 +33164,12 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .i64 => return .i64_type, .u128 => return .u128_type, .i128 => return .i128_type, - .anyopaque => return .anyopaque_type, - .bool => return .bool_type, - .void => return .void_type, - .type => return .type_type, - .anyerror => return .anyerror_type, - .comptime_int => return .comptime_int_type, - .noreturn => return .noreturn_type, - .@"anyframe" => return .anyframe_type, - .null => return .null_type, - .undefined => return .undefined_type, - .enum_literal => return .enum_literal_type, - .atomic_order => return .atomic_order_type, - .atomic_rmw_op => return .atomic_rmw_op_type, - .calling_convention => return .calling_convention_type, - .address_space => return .address_space_type, - .float_mode => return .float_mode_type, - .reduce_op => return .reduce_op_type, - .modifier => return .call_modifier_type, - .prefetch_options => return .prefetch_options_type, - .export_options => return .export_options_type, - .extern_options => return .extern_options_type, - .type_info => return .type_info_type, + .manyptr_u8 => return .manyptr_u8_type, .manyptr_const_u8 => return .manyptr_const_u8_type, .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, .const_slice_u8 => return .const_slice_u8_type, .anyerror_void_error_union => return .anyerror_void_error_union_type, - .generic_poison => return .generic_poison_type, else => {}, } try sema.air_instructions.append(sema.gpa, .{ @@ -33658,22 +33606,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .anyerror, - .noreturn, - .@"anyframe", - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, + .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -33687,19 +33620,12 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_inferred, .error_set_merged, .@"opaque", - .generic_poison, .array_u8, .array_u8_sentinel_0, .enum_simple, => false, .single_const_pointer_to_comptime_int, - .type, - .comptime_int, - .enum_literal, - .null, - .undefined, - .type_info, .function, => true, @@ -34476,17 +34402,6 @@ fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { const tag_ty = try mod.intType(.unsigned, bits); return sema.intInRange(tag_ty, int, fields_len); }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - => unreachable, else => unreachable, } diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 467186619709..7098cf3f321b 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3071,7 +3071,7 @@ fn errUnionErr( const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -3151,7 +3151,7 @@ fn errUnionPayload( const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4905,9 +4905,10 @@ fn isErr( error_union_bind: ReadArg.Bind, error_union_ty: Type, ) !MCValue { + const mod = self.bin_file.options.module.?; const error_type = error_union_ty.errorUnionSet(); - if (error_type.errorSetIsEmpty()) { + if (error_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false } diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index ca4a3826aa8e..bf94cf55a0e6 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2047,7 +2047,7 @@ fn errUnionErr( const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2124,7 +2124,7 @@ fn errUnionPayload( const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4882,9 +4882,10 @@ fn isErr( error_union_bind: ReadArg.Bind, error_union_ty: Type, ) !MCValue { + const mod = self.bin_file.options.module.?; const error_type = error_union_ty.errorUnionSet(); - if (error_type.errorSetIsEmpty()) { + if (error_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false } diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 343cc2f90edd..a519b7323502 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -3530,7 +3530,7 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { return error_union_mcv; } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index bbba43d26578..2c1e8aa36db9 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1036,8 +1036,8 @@ fn genValtype(ty: Type, mod: *Module) u8 { /// Differently from `genValtype` this also allows `void` to create a block /// with no return type fn genBlockType(ty: Type, mod: *Module) u8 { - return switch (ty.tag()) { - .void, .noreturn => wasm.block_empty, + return switch (ty.ip_index) { + .void_type, .noreturn_type => wasm.block_empty, else => genValtype(ty, mod), }; } @@ -3948,7 +3948,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro const pl_ty = err_union_ty.errorUnionPayload(); const result = result: { - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { switch (opcode) { .i32_ne => break :result WValue{ .imm32 = 0 }, .i32_eq => break :result WValue{ .imm32 = 1 }, @@ -4013,7 +4013,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) const payload_ty = err_ty.errorUnionPayload(); const result = result: { - if (err_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_ty.errorUnionSet().errorSetIsEmpty(mod)) { break :result WValue{ .imm32 = 0 }; } @@ -6214,7 +6214,7 @@ fn lowerTry( const pl_ty = err_union_ty.errorUnionPayload(); const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { // Block we can jump out of when error is not set try func.startBlock(.block, wasm.block_empty); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 865ebe02f75e..3e0ca4831b30 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -3624,7 +3624,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { break :result MCValue{ .immediate = 0 }; } @@ -5811,7 +5811,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: switch (tag) { .not => { const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(mod), 8)); - const int_info = if (src_ty.tag() == .bool) + const int_info = if (src_ty.ip_index == .bool_type) std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 } else src_ty.intInfo(mod); @@ -8716,7 +8716,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg); return .{ .eflags = .z }; } - assert(some_info.ty.tag() == .bool); + assert(some_info.ty.ip_index == .bool_type); const opt_abi_size = @intCast(u32, opt_ty.abiSize(mod)); try self.asmRegisterImmediate( .{ ._, .bt }, @@ -8808,7 +8808,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! const mod = self.bin_file.options.module.?; const err_type = ty.errorUnionSet(); - if (err_type.errorSetIsEmpty()) { + if (err_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index f4daa56a6d16..2e5e45d54cbb 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1508,7 +1508,7 @@ pub const DeclGen = struct { } if (fn_decl.val.castTag(.function)) |func_payload| if (func_payload.data.is_cold) try w.writeAll("zig_cold "); - if (fn_info.return_type.tag() == .noreturn) try w.writeAll("zig_noreturn "); + if (fn_info.return_type.ip_index == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( dg.decl_index, @@ -3783,7 +3783,7 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); - if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits); + if (scalar_ty.ip_index != .bool_type) return try airUnBuiltinCall(f, inst, "not", .bits); const op = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -4292,7 +4292,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const inst_ty = f.typeOfIndex(inst); - const result = if (inst_ty.tag() != .void and !f.liveness.isUnused(inst)) + const result = if (inst_ty.ip_index != .void_type and !f.liveness.isUnused(inst)) try f.allocLocal(inst, inst_ty) else .none; @@ -4354,7 +4354,7 @@ fn lowerTry( const payload_ty = err_union_ty.errorUnionPayload(); const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { try writer.writeAll("if ("); if (!payload_has_bits) { if (is_ptr) @@ -5549,7 +5549,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { if (!payload_ty.hasRuntimeBits(mod)) { try f.writeCValue(writer, operand, .Other); } else { - if (!error_ty.errorSetIsEmpty()) + if (!error_ty.errorSetIsEmpty(mod)) if (operand_is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) else @@ -5768,7 +5768,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (!error_ty.errorSetIsEmpty()) + if (!error_ty.errorSetIsEmpty(mod)) if (payload_ty.hasRuntimeBits(mod)) if (is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) @@ -6032,7 +6032,7 @@ fn airCmpBuiltinCall( try writer.writeByte(')'); if (!ref_ret) try writer.print(" {s} {}", .{ compareOperatorC(operator), - try f.fmtIntLiteral(Type.initTag(.i32), Value.zero), + try f.fmtIntLiteral(Type.i32, Value.zero), }); try writer.writeAll(";\n"); try v.end(f, inst, writer); @@ -7749,7 +7749,7 @@ const LowerFnRetTyBuffer = struct { payload: Type.Payload.AnonStruct, }; fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) Type { - if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.initTag(.noreturn); + if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.noreturn; if (lowersToArray(ret_ty, mod)) { buffer.names = [1][]const u8{"array"}; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ce78b06f2e72..232cd9d42f38 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1730,7 +1730,7 @@ pub const Object = struct { return ptr_di_ty; }, .Opaque => { - if (ty.tag() == .anyopaque) { + if (ty.ip_index == .anyopaque_type) { const di_ty = dib.createBasicType("anyopaque", 0, DW.ATE.signed); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; @@ -2847,25 +2847,23 @@ pub const DeclGen = struct { const llvm_addrspace = toLlvmAddressSpace(ptr_info.@"addrspace", target); return dg.context.pointerType(llvm_addrspace); }, - .Opaque => switch (t.tag()) { - .@"opaque" => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); - if (gop.found_existing) return gop.value_ptr.*; + .Opaque => { + if (t.ip_index == .anyopaque_type) return dg.context.intType(8); - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); + const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); + if (gop.found_existing) return gop.value_ptr.*; - const opaque_obj = t.castTag(.@"opaque").?.data; - const name = try opaque_obj.getFullyQualifiedName(dg.module); - defer gpa.free(name); + // The Type memory is ephemeral; since we want to store a longer-lived + // reference, we need to copy it here. + gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - const llvm_struct_ty = dg.context.structCreateNamed(name); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - return llvm_struct_ty; - }, - .anyopaque => return dg.context.intType(8), - else => unreachable, + const opaque_obj = t.castTag(.@"opaque").?.data; + const name = try opaque_obj.getFullyQualifiedName(dg.module); + defer gpa.free(name); + + const llvm_struct_ty = dg.context.structCreateNamed(name); + gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls + return llvm_struct_ty; }, .Array => { const elem_ty = t.childType(); @@ -5531,7 +5529,7 @@ pub const FuncGen = struct { const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_union_llvm_ty = try fg.dg.lowerType(err_union_ty); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { const is_err = err: { const err_set_ty = try fg.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); @@ -6715,7 +6713,7 @@ pub const FuncGen = struct { const err_set_ty = try self.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { const llvm_i1 = self.context.intType(1); switch (op) { .EQ => return llvm_i1.constInt(1, .False), // 0 == 0 @@ -6864,7 +6862,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { const err_llvm_ty = try self.dg.lowerType(Type.anyerror); if (operand_is_ptr) { return operand; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 90c2d93458ce..3a5f5d6f6aea 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2849,6 +2849,7 @@ pub const DeclGen = struct { } fn airTry(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const err_union_id = try self.resolve(pl_op.operand); const extra = self.air.extraData(Air.Try, pl_op.payload); @@ -2862,7 +2863,7 @@ pub const DeclGen = struct { const eu_layout = self.errorUnionLayout(payload_ty); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { const err_id = if (eu_layout.payload_has_bits) try self.extractField(Type.anyerror, err_union_id, eu_layout.errorFieldIndex()) else @@ -2910,12 +2911,13 @@ pub const DeclGen = struct { fn airErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); const err_union_ty = self.typeOf(ty_op.operand); const err_ty_ref = try self.resolveType(Type.anyerror, .direct); - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { // No error possible, so just return undefined. return try self.spv.constUndef(err_ty_ref); } diff --git a/src/print_air.zig b/src/print_air.zig index 39a244e11ffc..f4a1aeae3229 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -370,7 +370,6 @@ const Writer = struct { switch (t) { .inferred_alloc_const => try s.writeAll("(inferred_alloc_const)"), .inferred_alloc_mut => try s.writeAll("(inferred_alloc_mut)"), - .generic_poison => try s.writeAll("(generic_poison)"), else => try ty.print(s, w.module), } } diff --git a/src/type.zig b/src/type.zig index 4e8d0b9e2076..7db7ad316bfe 100644 --- a/src/type.zig +++ b/src/type.zig @@ -85,9 +85,9 @@ pub const Type = struct { .address_space, .float_mode, .reduce_op, + .call_modifier, => return .Enum, - .call_modifier, .prefetch_options, .export_options, .extern_options, @@ -95,7 +95,7 @@ pub const Type = struct { .type_info => return .Union, - .generic_poison => unreachable, + .generic_poison => return error.GenericPoison, .var_args_param => unreachable, }, @@ -107,8 +107,6 @@ pub const Type = struct { } } switch (ty.tag()) { - .generic_poison => return error.GenericPoison, - .u1, .u8, .i8, @@ -125,19 +123,11 @@ pub const Type = struct { .error_set, .error_set_single, - .anyerror, .error_set_inferred, .error_set_merged, => return .ErrorSet, - .anyopaque, .@"opaque" => return .Opaque, - .bool => return .Bool, - .void => return .Void, - .type => return .Type, - .comptime_int => return .ComptimeInt, - .noreturn => return .NoReturn, - .null => return .Null, - .undefined => return .Undefined, + .@"opaque" => return .Opaque, .function => return .Fn, @@ -172,18 +162,14 @@ pub const Type = struct { .optional_single_const_pointer, .optional_single_mut_pointer, => return .Optional, - .enum_literal => return .EnumLiteral, .anyerror_void_error_union, .error_union => return .ErrorUnion, - .anyframe_T, .@"anyframe" => return .AnyFrame, + .anyframe_T => return .AnyFrame, .empty_struct, .empty_struct_literal, .@"struct", - .prefetch_options, - .export_options, - .extern_options, .tuple, .anon_struct, => return .Struct, @@ -192,19 +178,11 @@ pub const Type = struct { .enum_nonexhaustive, .enum_simple, .enum_numbered, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, => return .Enum, .@"union", .union_safety_tagged, .union_tagged, - .type_info, => return .Union, } } @@ -393,7 +371,7 @@ pub const Type = struct { pub fn ptrInfo(self: Type) Payload.Pointer { switch (self.tag()) { .single_const_pointer_to_comptime_int => return .{ .data = .{ - .pointee_type = Type.initTag(.comptime_int), + .pointee_type = Type.comptime_int, .sentinel = null, .@"align" = 0, .@"addrspace" = .generic, @@ -405,7 +383,7 @@ pub const Type = struct { .size = .One, } }, .const_slice_u8 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), + .pointee_type = Type.u8, .sentinel = null, .@"align" = 0, .@"addrspace" = .generic, @@ -417,7 +395,7 @@ pub const Type = struct { .size = .Slice, } }, .const_slice_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), + .pointee_type = Type.u8, .sentinel = Value.zero, .@"align" = 0, .@"addrspace" = .generic, @@ -465,7 +443,7 @@ pub const Type = struct { .size = .Many, } }, .manyptr_const_u8 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), + .pointee_type = Type.u8, .sentinel = null, .@"align" = 0, .@"addrspace" = .generic, @@ -477,7 +455,7 @@ pub const Type = struct { .size = .Many, } }, .manyptr_const_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), + .pointee_type = Type.u8, .sentinel = Value.zero, .@"align" = 0, .@"addrspace" = .generic, @@ -501,7 +479,7 @@ pub const Type = struct { .size = .Many, } }, .manyptr_u8 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), + .pointee_type = Type.u8, .sentinel = null, .@"align" = 0, .@"addrspace" = .generic, @@ -608,23 +586,6 @@ pub const Type = struct { if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true; switch (a.tag()) { - .generic_poison => unreachable, - - .bool, - .void, - .type, - .comptime_int, - .noreturn, - .null, - .undefined, - .anyopaque, - .@"anyframe", - .enum_literal, - => |a_tag| { - assert(a_tag != b.tag()); // because of the comparison at the top of the function. - return false; - }, - .u1, .u8, .i8, @@ -653,10 +614,6 @@ pub const Type = struct { return a_ies == b_ies; }, - .anyerror => { - return b.tag() == .anyerror; - }, - .error_set, .error_set_single, .error_set_merged, @@ -927,13 +884,6 @@ pub const Type = struct { return true; }, - // we can't compare these based on tags because it wouldn't detect if, - // for example, a was resolved into .@"struct" but b was one of these tags. - .prefetch_options, - .export_options, - .extern_options, - => unreachable, // needed to resolve the type before now - .enum_full, .enum_nonexhaustive => { const a_enum_obj = a.cast(Payload.EnumFull).?.data; const b_enum_obj = (b.cast(Payload.EnumFull) orelse return false).data; @@ -949,26 +899,12 @@ pub const Type = struct { const b_enum_obj = (b.cast(Payload.EnumNumbered) orelse return false).data; return a_enum_obj == b_enum_obj; }, - // we can't compare these based on tags because it wouldn't detect if, - // for example, a was resolved into .enum_simple but b was one of these tags. - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - => unreachable, // needed to resolve the type before now .@"union", .union_safety_tagged, .union_tagged => { const a_union_obj = a.cast(Payload.Union).?.data; const b_union_obj = (b.cast(Payload.Union) orelse return false).data; return a_union_obj == b_union_obj; }, - // we can't compare these based on tags because it wouldn't detect if, - // for example, a was resolved into .union_tagged but b was one of these tags. - .type_info => unreachable, // needed to resolve the type before now - } } @@ -987,31 +923,6 @@ pub const Type = struct { return; } switch (ty.tag()) { - .generic_poison => unreachable, - - .bool => std.hash.autoHash(hasher, std.builtin.TypeId.Bool), - .void => std.hash.autoHash(hasher, std.builtin.TypeId.Void), - .type => std.hash.autoHash(hasher, std.builtin.TypeId.Type), - .comptime_int => std.hash.autoHash(hasher, std.builtin.TypeId.ComptimeInt), - .noreturn => std.hash.autoHash(hasher, std.builtin.TypeId.NoReturn), - .null => std.hash.autoHash(hasher, std.builtin.TypeId.Null), - .undefined => std.hash.autoHash(hasher, std.builtin.TypeId.Undefined), - - .anyopaque => { - std.hash.autoHash(hasher, std.builtin.TypeId.Opaque); - std.hash.autoHash(hasher, Tag.anyopaque); - }, - - .@"anyframe" => { - std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame); - std.hash.autoHash(hasher, Tag.@"anyframe"); - }, - - .enum_literal => { - std.hash.autoHash(hasher, std.builtin.TypeId.EnumLiteral); - std.hash.autoHash(hasher, Tag.enum_literal); - }, - .u1, .u8, .i8, @@ -1046,12 +957,6 @@ pub const Type = struct { for (names) |name| hasher.update(name); }, - .anyerror => { - // anyerror is distinct from other error sets - std.hash.autoHash(hasher, std.builtin.TypeId.ErrorSet); - std.hash.autoHash(hasher, Tag.anyerror); - }, - .error_set_inferred => { // inferred error sets are compared using their data pointer const ies: *Module.Fn.InferredErrorSet = ty.castTag(.error_set_inferred).?.data; @@ -1209,12 +1114,6 @@ pub const Type = struct { } }, - // we can't hash these based on tags because they wouldn't match the expanded version. - .prefetch_options, - .export_options, - .extern_options, - => unreachable, // needed to resolve the type before now - .enum_full, .enum_nonexhaustive => { const enum_obj: *const Module.EnumFull = ty.cast(Payload.EnumFull).?.data; std.hash.autoHash(hasher, std.builtin.TypeId.Enum); @@ -1230,24 +1129,12 @@ pub const Type = struct { std.hash.autoHash(hasher, std.builtin.TypeId.Enum); std.hash.autoHash(hasher, enum_obj); }, - // we can't hash these based on tags because they wouldn't match the expanded version. - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - => unreachable, // needed to resolve the type before now .@"union", .union_safety_tagged, .union_tagged => { const union_obj: *const Module.Union = ty.cast(Payload.Union).?.data; std.hash.autoHash(hasher, std.builtin.TypeId.Union); std.hash.autoHash(hasher, union_obj); }, - // we can't hash these based on tags because they wouldn't match the expanded version. - .type_info => unreachable, // needed to resolve the type before now - } } @@ -1305,19 +1192,9 @@ pub const Type = struct { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .type, - .anyerror, - .comptime_int, - .noreturn, - .null, - .undefined, .single_const_pointer_to_comptime_int, .const_slice_u8, .const_slice_u8_sentinel_0, - .enum_literal, .anyerror_void_error_union, .inferred_alloc_const, .inferred_alloc_mut, @@ -1325,19 +1202,6 @@ pub const Type = struct { .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", - .generic_poison, => unreachable, .array_u8, @@ -1580,20 +1444,8 @@ pub const Type = struct { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .type, - .anyerror, - .@"anyframe", - .comptime_int, - .noreturn, => return writer.writeAll(@tagName(t)), - .enum_literal => return writer.writeAll("@Type(.EnumLiteral)"), - .null => return writer.writeAll("@Type(.Null)"), - .undefined => return writer.writeAll("@Type(.Undefined)"), - .empty_struct, .empty_struct_literal => return writer.writeAll("struct {}"), .@"struct" => { @@ -1640,17 +1492,6 @@ pub const Type = struct { .manyptr_u8 => return writer.writeAll("[*]u8"), .manyptr_const_u8 => return writer.writeAll("[*]const u8"), .manyptr_const_u8_sentinel_0 => return writer.writeAll("[*:0]const u8"), - .atomic_order => return writer.writeAll("std.builtin.AtomicOrder"), - .atomic_rmw_op => return writer.writeAll("std.builtin.AtomicRmwOp"), - .calling_convention => return writer.writeAll("std.builtin.CallingConvention"), - .address_space => return writer.writeAll("std.builtin.AddressSpace"), - .float_mode => return writer.writeAll("std.builtin.FloatMode"), - .reduce_op => return writer.writeAll("std.builtin.ReduceOp"), - .modifier => return writer.writeAll("std.builtin.CallModifier"), - .prefetch_options => return writer.writeAll("std.builtin.PrefetchOptions"), - .export_options => return writer.writeAll("std.builtin.ExportOptions"), - .extern_options => return writer.writeAll("std.builtin.ExternOptions"), - .type_info => return writer.writeAll("std.builtin.Type"), .function => { const payload = ty.castTag(.function).?.data; try writer.writeAll("fn("); @@ -1889,7 +1730,6 @@ pub const Type = struct { }, .inferred_alloc_const => return writer.writeAll("(inferred_alloc_const)"), .inferred_alloc_mut => return writer.writeAll("(inferred_alloc_mut)"), - .generic_poison => return writer.writeAll("(generic poison)"), } unreachable; } @@ -1931,20 +1771,6 @@ pub const Type = struct { switch (t) { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .generic_poison => unreachable, - - // TODO get rid of these Type.Tag values. - .atomic_order => unreachable, - .atomic_rmw_op => unreachable, - .calling_convention => unreachable, - .address_space => unreachable, - .float_mode => unreachable, - .reduce_op => unreachable, - .modifier => unreachable, - .prefetch_options => unreachable, - .export_options => unreachable, - .extern_options => unreachable, - .type_info => unreachable, .u1, .u8, @@ -1958,19 +1784,8 @@ pub const Type = struct { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .type, - .anyerror, - .@"anyframe", - .comptime_int, - .noreturn, => try writer.writeAll(@tagName(t)), - .enum_literal => try writer.writeAll("@TypeOf(.enum_literal)"), - .null => try writer.writeAll("@TypeOf(null)"), - .undefined => try writer.writeAll("@TypeOf(undefined)"), .empty_struct_literal => try writer.writeAll("@TypeOf(.{})"), .empty_struct => { @@ -2249,34 +2064,12 @@ pub const Type = struct { .i32 => return Value.initTag(.i32_type), .u64 => return Value.initTag(.u64_type), .i64 => return Value.initTag(.i64_type), - .anyopaque => return Value.initTag(.anyopaque_type), - .bool => return Value.initTag(.bool_type), - .void => return Value.initTag(.void_type), - .type => return Value.initTag(.type_type), - .anyerror => return Value.initTag(.anyerror_type), - .@"anyframe" => return Value.initTag(.anyframe_type), - .comptime_int => return Value.initTag(.comptime_int_type), - .noreturn => return Value.initTag(.noreturn_type), - .null => return Value.initTag(.null_type), - .undefined => return Value.initTag(.undefined_type), .single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type), .const_slice_u8 => return Value.initTag(.const_slice_u8_type), .const_slice_u8_sentinel_0 => return Value.initTag(.const_slice_u8_sentinel_0_type), - .enum_literal => return Value.initTag(.enum_literal_type), .manyptr_u8 => return Value.initTag(.manyptr_u8_type), .manyptr_const_u8 => return Value.initTag(.manyptr_const_u8_type), .manyptr_const_u8_sentinel_0 => return Value.initTag(.manyptr_const_u8_sentinel_0_type), - .atomic_order => return Value.initTag(.atomic_order_type), - .atomic_rmw_op => return Value.initTag(.atomic_rmw_op_type), - .calling_convention => return Value.initTag(.calling_convention_type), - .address_space => return Value.initTag(.address_space_type), - .float_mode => return Value.initTag(.float_mode_type), - .reduce_op => return Value.initTag(.reduce_op_type), - .modifier => return Value.initTag(.modifier_type), - .prefetch_options => return Value.initTag(.prefetch_options_type), - .export_options => return Value.initTag(.export_options_type), - .extern_options => return Value.initTag(.extern_options_type), - .type_info => return Value.initTag(.type_info_type), .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, else => return Value.Tag.ty.create(allocator, self), @@ -2378,8 +2171,7 @@ pub const Type = struct { .i64, .u128, .i128, - .bool, - .anyerror, + .const_slice_u8, .const_slice_u8_sentinel_0, .array_u8_sentinel_0, @@ -2388,18 +2180,7 @@ pub const Type = struct { .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .@"anyframe", - .anyopaque, + .@"opaque", .error_set_single, .error_union, @@ -2435,16 +2216,8 @@ pub const Type = struct { // These are false because they are comptime-only types. .single_const_pointer_to_comptime_int, - .void, - .type, - .comptime_int, - .noreturn, - .null, - .undefined, - .enum_literal, .empty_struct, .empty_struct_literal, - .type_info, // These are function *bodies*, not pointers. // Special exceptions have to be made when emitting functions due to // this returning false. @@ -2558,7 +2331,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .generic_poison => unreachable, } } @@ -2641,8 +2413,7 @@ pub const Type = struct { .i64, .u128, .i128, - .bool, - .void, + .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -2662,32 +2433,11 @@ pub const Type = struct { .optional_single_const_pointer, => true, - .anyopaque, - .anyerror, - .noreturn, - .null, - .@"anyframe", - .undefined, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, .error_set, .error_set_single, .error_set_inferred, .error_set_merged, .@"opaque", - .generic_poison, - .type, - .comptime_int, - .enum_literal, - .type_info, // These are function bodies, not function pointers. .function, .const_slice_u8, @@ -2773,7 +2523,6 @@ pub const Type = struct { else => return false, @enumToInt(InternPool.Index.none) => switch (ty.tag()) { - .noreturn => return true, .error_set => { const err_set_obj = ty.castTag(.error_set).?.data; const names = err_set_obj.names.keys(); @@ -3003,21 +2752,10 @@ pub const Type = struct { .u1, .u8, .i8, - .bool, + .array_u8_sentinel_0, .array_u8, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, .@"opaque", - .anyopaque, => return AbiAlignmentAdvanced{ .scalar = 1 }, // represents machine code; not a pointer @@ -3044,13 +2782,11 @@ pub const Type = struct { .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .@"anyframe", .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, - .anyerror, .error_set_inferred, .error_set_single, .error_set, @@ -3229,22 +2965,12 @@ pub const Type = struct { }, .empty_struct, - .void, .empty_struct_literal, - .type, - .comptime_int, - .null, - .undefined, - .enum_literal, - .type_info, => return AbiAlignmentAdvanced{ .scalar = 0 }, - .noreturn, .inferred_alloc_const, .inferred_alloc_mut, => unreachable, - - .generic_poison => unreachable, } } @@ -3422,26 +3148,12 @@ pub const Type = struct { switch (ty.tag()) { .function => unreachable, // represents machine code; not a pointer .@"opaque" => unreachable, // no size available - .noreturn => unreachable, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .generic_poison => unreachable, - .modifier => unreachable, // missing call to resolveTypeFields - .prefetch_options => unreachable, // missing call to resolveTypeFields - .export_options => unreachable, // missing call to resolveTypeFields - .extern_options => unreachable, // missing call to resolveTypeFields - .type_info => unreachable, // missing call to resolveTypeFields - - .anyopaque, - .type, - .comptime_int, - .null, - .undefined, - .enum_literal, + .single_const_pointer_to_comptime_int, .empty_struct_literal, .empty_struct, - .void, => return AbiSizeAdvanced{ .scalar = 0 }, .@"struct", .tuple, .anon_struct => switch (ty.containerLayout()) { @@ -3496,13 +3208,6 @@ pub const Type = struct { .u1, .u8, .i8, - .bool, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, => return AbiSizeAdvanced{ .scalar = 1 }, .array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data }, @@ -3552,7 +3257,6 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = result }; }, - .@"anyframe", .anyframe_T, .optional_single_const_pointer, .optional_single_mut_pointer, @@ -3580,7 +3284,6 @@ pub const Type = struct { // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, - .anyerror, .error_set_inferred, .error_set, .error_set_merged, @@ -3758,6 +3461,7 @@ pub const Type = struct { .undefined => unreachable, .enum_literal => unreachable, .generic_poison => unreachable, + .var_args_param => unreachable, .atomic_order => unreachable, // missing call to resolveTypeFields .atomic_rmw_op => unreachable, // missing call to resolveTypeFields @@ -3770,7 +3474,6 @@ pub const Type = struct { .export_options => unreachable, // missing call to resolveTypeFields .extern_options => unreachable, // missing call to resolveTypeFields .type_info => unreachable, // missing call to resolveTypeFields - .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -3784,23 +3487,14 @@ pub const Type = struct { switch (ty.tag()) { .function => unreachable, // represents machine code; not a pointer - .anyopaque => unreachable, - .type => unreachable, - .comptime_int => unreachable, - .noreturn => unreachable, - .null => unreachable, - .undefined => unreachable, - .enum_literal => unreachable, .single_const_pointer_to_comptime_int => unreachable, .empty_struct => unreachable, .empty_struct_literal => unreachable, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, .@"opaque" => unreachable, - .generic_poison => unreachable, - .void => return 0, - .bool, .u1 => return 1, + .u1 => return 1, .u8, .i8 => return 8, .i16, .u16 => return 16, .u29 => return 29, @@ -3875,9 +3569,7 @@ pub const Type = struct { return payload.len * 8 * elem_size + elem_bit_size; }, - .@"anyframe", - .anyframe_T, - => return target.ptrBitWidth(), + .anyframe_T => return target.ptrBitWidth(), .const_slice, .mut_slice, @@ -3916,7 +3608,6 @@ pub const Type = struct { .error_set, .error_set_single, .anyerror_void_error_union, - .anyerror, .error_set_inferred, .error_set_merged, => return 16, // TODO revisit this when we have the concept of the error tag type @@ -3926,19 +3617,6 @@ pub const Type = struct { // includes padding bits. return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; }, - - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => @panic("TODO at some point we gotta resolve builtin types"), } } @@ -4326,7 +4004,7 @@ pub const Type = struct { .manyptr_const_u8_sentinel_0, => Type.u8, - .single_const_pointer_to_comptime_int => Type.initTag(.comptime_int), + .single_const_pointer_to_comptime_int => Type.comptime_int, .pointer => ty.castTag(.pointer).?.data.pointee_type, else => unreachable, @@ -4372,7 +4050,7 @@ pub const Type = struct { .manyptr_const_u8_sentinel_0, => Type.u8, - .single_const_pointer_to_comptime_int => Type.initTag(.comptime_int), + .single_const_pointer_to_comptime_int => Type.comptime_int, .pointer => { const info = ty.castTag(.pointer).?.data; const child_ty = info.pointee_type; @@ -4387,7 +4065,6 @@ pub const Type = struct { .optional_single_const_pointer => ty.castPointer().?.data, .anyframe_T => ty.castTag(.anyframe_T).?.data, - .@"anyframe" => Type.void, else => unreachable, }; @@ -4468,19 +4145,6 @@ pub const Type = struct { return union_obj.tag_ty; }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => unreachable, // needed to call resolveTypeFields first - else => null, }; } @@ -4495,19 +4159,6 @@ pub const Type = struct { return union_obj.tag_ty; }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => unreachable, // needed to call resolveTypeFields first - else => null, }; } @@ -4572,7 +4223,7 @@ pub const Type = struct { /// Asserts that the type is an error union. pub fn errorUnionPayload(self: Type) Type { return switch (self.tag()) { - .anyerror_void_error_union => Type.initTag(.void), + .anyerror_void_error_union => Type.void, .error_union => self.castTag(.error_union).?.data.payload, else => unreachable, }; @@ -4580,33 +4231,38 @@ pub const Type = struct { pub fn errorUnionSet(self: Type) Type { return switch (self.tag()) { - .anyerror_void_error_union => Type.initTag(.anyerror), + .anyerror_void_error_union => Type.anyerror, .error_union => self.castTag(.error_union).?.data.error_set, else => unreachable, }; } /// Returns false for unresolved inferred error sets. - pub fn errorSetIsEmpty(ty: Type) bool { - switch (ty.tag()) { - .anyerror => return false, - .error_set_inferred => { - const inferred_error_set = ty.castTag(.error_set_inferred).?.data; - // Can't know for sure. - if (!inferred_error_set.is_resolved) return false; - if (inferred_error_set.is_anyerror) return false; - return inferred_error_set.errors.count() == 0; - }, - .error_set_single => return false, - .error_set => { - const err_set_obj = ty.castTag(.error_set).?.data; - return err_set_obj.names.count() == 0; + pub fn errorSetIsEmpty(ty: Type, mod: *const Module) bool { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .error_set_inferred => { + const inferred_error_set = ty.castTag(.error_set_inferred).?.data; + // Can't know for sure. + if (!inferred_error_set.is_resolved) return false; + if (inferred_error_set.is_anyerror) return false; + return inferred_error_set.errors.count() == 0; + }, + .error_set_single => return false, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + return err_set_obj.names.count() == 0; + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + return name_map.count() == 0; + }, + else => unreachable, }, - .error_set_merged => { - const name_map = ty.castTag(.error_set_merged).?.data; - return name_map.count() == 0; + .anyerror_type => return false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => @panic("TODO"), }, - else => unreachable, } } @@ -4614,9 +4270,13 @@ pub const Type = struct { /// Note that the result may be a false negative if the type did not get error set /// resolution prior to this call. pub fn isAnyError(ty: Type) bool { - return switch (ty.tag()) { - .anyerror => true, - .error_set_inferred => ty.castTag(.error_set_inferred).?.data.is_anyerror, + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .error_set_inferred => ty.castTag(.error_set_inferred).?.data.is_anyerror, + else => false, + }, + .anyerror_type => true, + // TODO handle error_set_inferred here else => false, }; } @@ -4788,72 +4448,75 @@ pub const Type = struct { const target = mod.getTarget(); var ty = starting_ty; - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type, - .ptr_type => unreachable, - .array_type => unreachable, - .vector_type => @panic("TODO"), - .optional_type => unreachable, - .error_union_type => unreachable, - .simple_type => |t| switch (t) { - .usize => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, - .isize => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, - .c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, - .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, - .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, - .c_int => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, - .c_uint => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, - .c_long => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, - .c_ulong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, - .c_longlong => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, - .c_ulonglong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, - else => unreachable, - }, - .struct_type => @panic("TODO"), - .union_type => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type - }; + while (true) switch (ty.ip_index) { + .none => switch (ty.tag()) { + .u1 => return .{ .signedness = .unsigned, .bits = 1 }, + .u8 => return .{ .signedness = .unsigned, .bits = 8 }, + .i8 => return .{ .signedness = .signed, .bits = 8 }, + .u16 => return .{ .signedness = .unsigned, .bits = 16 }, + .i16 => return .{ .signedness = .signed, .bits = 16 }, + .u29 => return .{ .signedness = .unsigned, .bits = 29 }, + .u32 => return .{ .signedness = .unsigned, .bits = 32 }, + .i32 => return .{ .signedness = .signed, .bits = 32 }, + .u64 => return .{ .signedness = .unsigned, .bits = 64 }, + .i64 => return .{ .signedness = .signed, .bits = 64 }, + .u128 => return .{ .signedness = .unsigned, .bits = 128 }, + .i128 => return .{ .signedness = .signed, .bits = 128 }, + + .enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty, + .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, + .enum_simple => { + const enum_obj = ty.castTag(.enum_simple).?.data; + const field_count = enum_obj.fields.count(); + if (field_count == 0) return .{ .signedness = .unsigned, .bits = 0 }; + return .{ .signedness = .unsigned, .bits = smallestUnsignedBits(field_count - 1) }; + }, - while (true) switch (ty.tag()) { - .u1 => return .{ .signedness = .unsigned, .bits = 1 }, - .u8 => return .{ .signedness = .unsigned, .bits = 8 }, - .i8 => return .{ .signedness = .signed, .bits = 8 }, - .u16 => return .{ .signedness = .unsigned, .bits = 16 }, - .i16 => return .{ .signedness = .signed, .bits = 16 }, - .u29 => return .{ .signedness = .unsigned, .bits = 29 }, - .u32 => return .{ .signedness = .unsigned, .bits = 32 }, - .i32 => return .{ .signedness = .signed, .bits = 32 }, - .u64 => return .{ .signedness = .unsigned, .bits = 64 }, - .i64 => return .{ .signedness = .signed, .bits = 64 }, - .u128 => return .{ .signedness = .unsigned, .bits = 128 }, - .i128 => return .{ .signedness = .signed, .bits = 128 }, - - .enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty, - .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, - .enum_simple => { - const enum_obj = ty.castTag(.enum_simple).?.data; - const field_count = enum_obj.fields.count(); - if (field_count == 0) return .{ .signedness = .unsigned, .bits = 0 }; - return .{ .signedness = .unsigned, .bits = smallestUnsignedBits(field_count - 1) }; - }, + .error_set, .error_set_single, .error_set_inferred, .error_set_merged => { + // TODO revisit this when error sets support custom int types + return .{ .signedness = .unsigned, .bits = 16 }; + }, + + .vector => ty = ty.castTag(.vector).?.data.elem_type, + + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + assert(struct_obj.layout == .Packed); + ty = struct_obj.backing_int_ty; + }, - .error_set, .error_set_single, .anyerror, .error_set_inferred, .error_set_merged => { + else => unreachable, + }, + .anyerror_type => { // TODO revisit this when error sets support custom int types return .{ .signedness = .unsigned, .bits = 16 }; }, - - .vector => ty = ty.castTag(.vector).?.data.elem_type, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.layout == .Packed); - ty = struct_obj.backing_int_ty; + .usize_type => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, + .isize_type => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, + .c_char_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, + .c_short_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, + .c_ushort_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, + .c_int_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, + .c_uint_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, + .c_long_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, + .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, + .c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, + .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type, + .ptr_type => unreachable, + .array_type => unreachable, + .vector_type => @panic("TODO"), + .optional_type => unreachable, + .error_union_type => unreachable, + .simple_type => unreachable, // handled via Index enum tag above + .struct_type => @panic("TODO"), + .union_type => unreachable, + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, }, - - else => unreachable, }; } @@ -5021,7 +4684,6 @@ pub const Type = struct { else => false, }; return switch (ty.tag()) { - .comptime_int, .u1, .u8, .i8, @@ -5114,7 +4776,6 @@ pub const Type = struct { }; while (true) switch (ty.tag()) { - .comptime_int, .u1, .u8, .i8, @@ -5127,9 +4788,7 @@ pub const Type = struct { .i64, .u128, .i128, - .bool, - .type, - .anyerror, + .error_union, .error_set_single, .error_set, @@ -5142,28 +4801,14 @@ pub const Type = struct { .const_slice_u8_sentinel_0, .const_slice, .mut_slice, - .anyopaque, .optional_single_mut_pointer, .optional_single_const_pointer, - .enum_literal, .anyerror_void_error_union, .error_set_inferred, .@"opaque", .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", .anyframe_T, .many_const_pointer, .many_mut_pointer, @@ -5258,10 +4903,6 @@ pub const Type = struct { }, .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .void => return Value.initTag(.void_value), - .noreturn => return Value.initTag(.unreachable_value), - .null => return Value.initTag(.null_value), - .undefined => return Value.initTag(.undef), .vector, .array, .array_u8 => { if (ty.arrayLen() == 0) @@ -5273,7 +4914,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .generic_poison => unreachable, }; } @@ -5358,22 +4998,7 @@ pub const Type = struct { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .anyerror, - .noreturn, - .@"anyframe", - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, + .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -5387,21 +5012,14 @@ pub const Type = struct { .error_set_inferred, .error_set_merged, .@"opaque", - .generic_poison, .array_u8, .array_u8_sentinel_0, .enum_simple, => false, .single_const_pointer_to_comptime_int, - .type, - .comptime_int, - .enum_literal, - .type_info, // These are function bodies, not function pointers. .function, - .null, - .undefined, => true, .inferred_alloc_mut => unreachable, @@ -5701,17 +5319,6 @@ pub const Type = struct { .enum_full, .enum_nonexhaustive => ty.cast(Payload.EnumFull).?.data.fields, .enum_simple => ty.castTag(.enum_simple).?.data.fields, .enum_numbered => ty.castTag(.enum_numbered).?.data.fields, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - => @panic("TODO resolve std.builtin types"), else => unreachable, }; } @@ -5779,17 +5386,6 @@ pub const Type = struct { const tag_ty = mod.intType(.unsigned, bits) catch @panic("TODO: handle OOM here"); return S.fieldWithRange(tag_ty, enum_tag, fields_len, mod); }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - => @panic("TODO resolve std.builtin types"), else => unreachable, } } @@ -6102,18 +5698,6 @@ pub const Type = struct { const opaque_obj = ty.cast(Payload.Opaque).?.data; return opaque_obj.srcLoc(mod); }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => unreachable, // needed to call resolveTypeFields first else => return null, } @@ -6150,29 +5734,17 @@ pub const Type = struct { const opaque_obj = ty.cast(Payload.Opaque).?.data; return opaque_obj.owner_decl; }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => unreachable, // These need to be resolved earlier. else => return null, } } pub fn isGenericPoison(ty: Type) bool { - return switch (ty.ip_index) { - .generic_poison_type => true, - .none => ty.tag() == .generic_poison, - else => false, - }; + return ty.ip_index == .generic_poison_type; + } + + pub fn isVarArgsParam(ty: Type) bool { + return ty.ip_index == .none and ty.tag() == .var_args_param; } /// This enum does not directly correspond to `std.builtin.TypeId` because @@ -6195,28 +5767,7 @@ pub const Type = struct { i64, u128, i128, - anyopaque, - bool, - void, - type, - anyerror, - comptime_int, - noreturn, - @"anyframe", - null, - undefined, - enum_literal, - atomic_order, - atomic_rmw_op, - calling_convention, - address_space, - float_mode, - reduce_op, - modifier, - prefetch_options, - export_options, - extern_options, - type_info, + manyptr_u8, manyptr_const_u8, manyptr_const_u8_sentinel_0, @@ -6224,7 +5775,6 @@ pub const Type = struct { const_slice_u8, const_slice_u8_sentinel_0, anyerror_void_error_union, - generic_poison, /// Same as `empty_struct` except it has an empty namespace. empty_struct_literal, /// This is a special value that tracks a set of types that have been stored @@ -6292,39 +5842,17 @@ pub const Type = struct { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .type, - .anyerror, - .comptime_int, - .noreturn, - .enum_literal, - .null, - .undefined, + .single_const_pointer_to_comptime_int, .anyerror_void_error_union, .const_slice_u8, .const_slice_u8_sentinel_0, - .generic_poison, .inferred_alloc_const, .inferred_alloc_mut, .empty_struct_literal, .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"), .array_u8, @@ -6674,18 +6202,19 @@ pub const Type = struct { pub const @"f80": Type = .{ .ip_index = .f80_type, .legacy = undefined }; pub const @"f128": Type = .{ .ip_index = .f128_type, .legacy = undefined }; - pub const @"bool" = initTag(.bool); + pub const @"bool": Type = .{ .ip_index = .bool_type, .legacy = undefined }; pub const @"usize": Type = .{ .ip_index = .usize_type, .legacy = undefined }; pub const @"isize": Type = .{ .ip_index = .isize_type, .legacy = undefined }; pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type, .legacy = undefined }; pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type, .legacy = undefined }; - pub const @"void" = initTag(.void); - pub const @"type" = initTag(.type); - pub const @"anyerror" = initTag(.anyerror); - pub const @"anyopaque" = initTag(.anyopaque); - pub const @"null" = initTag(.null); - pub const @"undefined" = initTag(.undefined); - pub const @"noreturn" = initTag(.noreturn); + pub const @"void": Type = .{ .ip_index = .void_type, .legacy = undefined }; + pub const @"type": Type = .{ .ip_index = .type_type, .legacy = undefined }; + pub const @"anyerror": Type = .{ .ip_index = .anyerror_type, .legacy = undefined }; + pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type, .legacy = undefined }; + pub const @"anyframe": Type = .{ .ip_index = .anyframe_type, .legacy = undefined }; + pub const @"null": Type = .{ .ip_index = .null_type, .legacy = undefined }; + pub const @"undefined": Type = .{ .ip_index = .undefined_type, .legacy = undefined }; + pub const @"noreturn": Type = .{ .ip_index = .noreturn_type, .legacy = undefined }; pub const @"c_char": Type = .{ .ip_index = .c_char_type, .legacy = undefined }; pub const @"c_short": Type = .{ .ip_index = .c_short_type, .legacy = undefined }; @@ -6698,6 +6227,8 @@ pub const Type = struct { pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type, .legacy = undefined }; pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined }; + pub const generic_poison: Type = .{ .ip_index = .generic_poison_type, .legacy = undefined }; + pub const err_int = Type.u16; pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type { diff --git a/src/value.zig b/src/value.zig index b0484dfc7685..2f9f395017bd 100644 --- a/src/value.zig +++ b/src/value.zig @@ -991,26 +991,26 @@ pub const Value = struct { .null_type => Type.null, .undefined_type => Type.undefined, .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), - .anyframe_type => Type.initTag(.@"anyframe"), + .anyframe_type => Type.@"anyframe", .const_slice_u8_type => Type.initTag(.const_slice_u8), .const_slice_u8_sentinel_0_type => Type.initTag(.const_slice_u8_sentinel_0), .anyerror_void_error_union_type => Type.initTag(.anyerror_void_error_union), - .generic_poison_type => Type.initTag(.generic_poison), - .enum_literal_type => Type.initTag(.enum_literal), + .generic_poison_type => .{ .ip_index = .generic_poison_type, .legacy = undefined }, + .enum_literal_type => .{ .ip_index = .enum_literal_type, .legacy = undefined }, .manyptr_u8_type => Type.initTag(.manyptr_u8), .manyptr_const_u8_type => Type.initTag(.manyptr_const_u8), .manyptr_const_u8_sentinel_0_type => Type.initTag(.manyptr_const_u8_sentinel_0), - .atomic_order_type => Type.initTag(.atomic_order), - .atomic_rmw_op_type => Type.initTag(.atomic_rmw_op), - .calling_convention_type => Type.initTag(.calling_convention), - .address_space_type => Type.initTag(.address_space), - .float_mode_type => Type.initTag(.float_mode), - .reduce_op_type => Type.initTag(.reduce_op), - .modifier_type => Type.initTag(.modifier), - .prefetch_options_type => Type.initTag(.prefetch_options), - .export_options_type => Type.initTag(.export_options), - .extern_options_type => Type.initTag(.extern_options), - .type_info_type => Type.initTag(.type_info), + .atomic_order_type => .{ .ip_index = .atomic_order_type, .legacy = undefined }, + .atomic_rmw_op_type => .{ .ip_index = .atomic_rmw_op_type, .legacy = undefined }, + .calling_convention_type => .{ .ip_index = .calling_convention_type, .legacy = undefined }, + .address_space_type => .{ .ip_index = .address_space_type, .legacy = undefined }, + .float_mode_type => .{ .ip_index = .float_mode_type, .legacy = undefined }, + .reduce_op_type => .{ .ip_index = .reduce_op_type, .legacy = undefined }, + .modifier_type => .{ .ip_index = .call_modifier_type, .legacy = undefined }, + .prefetch_options_type => .{ .ip_index = .prefetch_options_type, .legacy = undefined }, + .export_options_type => .{ .ip_index = .export_options_type, .legacy = undefined }, + .extern_options_type => .{ .ip_index = .extern_options_type, .legacy = undefined }, + .type_info_type => .{ .ip_index = .type_info_type, .legacy = undefined }, else => unreachable, }; From ca3cf93b21bc77535fbaa7ca6aa411654dcfe069 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 19:12:53 -0700 Subject: [PATCH 015/205] stage2: move most simple values to InternPool --- src/Air.zig | 3 + src/InternPool.zig | 31 ++ src/Module.zig | 50 ++- src/Sema.zig | 134 +++--- src/TypedValue.zig | 57 --- src/Zir.zig | 3 + src/codegen.zig | 4 +- src/codegen/c.zig | 2 +- src/codegen/llvm.zig | 4 +- src/codegen/spirv.zig | 6 +- src/type.zig | 32 +- src/value.zig | 938 ++++++++++++++++-------------------------- 12 files changed, 521 insertions(+), 743 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 1bc9d949e298..4124788605b0 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -899,8 +899,10 @@ pub const Inst = struct { type_info_type = @enumToInt(InternPool.Index.type_info_type), manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type), manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), + manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type), single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), + const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), @@ -908,6 +910,7 @@ pub const Inst = struct { undef = @enumToInt(InternPool.Index.undef), zero = @enumToInt(InternPool.Index.zero), zero_usize = @enumToInt(InternPool.Index.zero_usize), + zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), diff --git a/src/InternPool.zig b/src/InternPool.zig index 146a880493d0..6345d36f261e 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -318,8 +318,10 @@ pub const Index = enum(u32) { type_info_type, manyptr_u8_type, manyptr_const_u8_type, + manyptr_const_u8_sentinel_0_type, single_const_pointer_to_comptime_int_type, const_slice_u8_type, + const_slice_u8_sentinel_0_type, anyerror_void_error_union_type, generic_poison_type, var_args_param_type, @@ -331,6 +333,8 @@ pub const Index = enum(u32) { zero, /// `0` (usize) zero_usize, + /// `0` (u8) + zero_u8, /// `1` (comptime_int) one, /// `1` (usize) @@ -489,24 +493,43 @@ pub const static_keys = [_]Key{ .size = .Many, } }, + // manyptr_const_u8_type .{ .ptr_type = .{ .elem_type = .u8_type, .size = .Many, .is_const = true, } }, + // manyptr_const_u8_sentinel_0_type + .{ .ptr_type = .{ + .elem_type = .u8_type, + .sentinel = .zero_u8, + .size = .Many, + .is_const = true, + } }, + .{ .ptr_type = .{ .elem_type = .comptime_int_type, .size = .One, .is_const = true, } }, + // const_slice_u8_type .{ .ptr_type = .{ .elem_type = .u8_type, .size = .Slice, .is_const = true, } }, + // const_slice_u8_sentinel_0_type + .{ .ptr_type = .{ + .elem_type = .u8_type, + .sentinel = .zero_u8, + .size = .Slice, + .is_const = true, + } }, + + // anyerror_void_error_union_type .{ .error_union_type = .{ .error_set_type = .anyerror_type, .payload_type = .void_type, @@ -541,6 +564,14 @@ pub const static_keys = [_]Key{ }, } }, + .{ .int = .{ + .ty = .u8_type, + .big_int = .{ + .limbs = &.{0}, + .positive = true, + }, + } }, + .{ .int = .{ .ty = .comptime_int_type, .big_int = .{ diff --git a/src/Module.zig b/src/Module.zig index 77c20fbcc668..5c84b123c121 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4847,31 +4847,37 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.owns_tv = false; var queue_linker_work = false; var is_extern = false; - switch (decl_tv.val.tag()) { - .variable => { - const variable = decl_tv.val.castTag(.variable).?.data; - if (variable.owner_decl == decl_index) { - decl.owns_tv = true; - queue_linker_work = true; - - const copied_init = try variable.init.copy(decl_arena_allocator); - variable.init = copied_init; - } - }, - .extern_fn => { - const extern_fn = decl_tv.val.castTag(.extern_fn).?.data; - if (extern_fn.owner_decl == decl_index) { - decl.owns_tv = true; - queue_linker_work = true; - is_extern = true; - } - }, - + switch (decl_tv.val.ip_index) { .generic_poison => unreachable, - .unreachable_value => unreachable, + .none => switch (decl_tv.val.tag()) { + .variable => { + const variable = decl_tv.val.castTag(.variable).?.data; + if (variable.owner_decl == decl_index) { + decl.owns_tv = true; + queue_linker_work = true; + + const copied_init = try variable.init.copy(decl_arena_allocator); + variable.init = copied_init; + } + }, + .extern_fn => { + const extern_fn = decl_tv.val.castTag(.extern_fn).?.data; + if (extern_fn.owner_decl == decl_index) { + decl.owns_tv = true; + queue_linker_work = true; + is_extern = true; + } + }, + + .unreachable_value => unreachable, - .function => {}, + .function => {}, + else => { + log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name }); + queue_linker_work = true; + }, + }, else => { log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name }); queue_linker_work = true; diff --git a/src/Sema.zig b/src/Sema.zig index ea8258717b5f..65475104aae0 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1569,6 +1569,7 @@ fn analyzeBodyInner( }, .condbr => blk: { if (!block.is_comptime) break sema.zirCondbr(block, inst); + const mod = sema.mod; // Same as condbr_inline. TODO https://github.com/ziglang/zig/issues/8220 const inst_data = datas[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; @@ -1579,7 +1580,7 @@ fn analyzeBodyInner( if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - const inline_body = if (cond.val.toBool()) then_body else else_body; + const inline_body = if (cond.val.toBool(mod)) then_body else else_body; try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src); const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1591,6 +1592,7 @@ fn analyzeBodyInner( } }, .condbr_inline => blk: { + const mod = sema.mod; const inst_data = datas[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); @@ -1600,7 +1602,7 @@ fn analyzeBodyInner( if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - const inline_body = if (cond.val.toBool()) then_body else else_body; + const inline_body = if (cond.val.toBool(mod)) then_body else else_body; try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src); const old_runtime_index = block.runtime_index; @@ -1634,7 +1636,7 @@ fn analyzeBodyInner( if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_val.toBool()) { + if (is_non_err_val.toBool(mod)) { break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1647,6 +1649,7 @@ fn analyzeBodyInner( }, .try_ptr => blk: { if (!block.is_comptime) break :blk try sema.zirTryPtr(block, inst); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -1660,7 +1663,7 @@ fn analyzeBodyInner( if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_val.toBool()) { + if (is_non_err_val.toBool(mod)) { break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1741,11 +1744,12 @@ fn resolveConstBool( zir_ref: Zir.Inst.Ref, reason: []const u8, ) !bool { + const mod = sema.mod; const air_inst = try sema.resolveInst(zir_ref); const wanted_type = Type.bool; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, reason); - return val.toBool(); + return val.toBool(mod); } pub fn resolveConstString( @@ -1843,9 +1847,12 @@ fn resolveConstMaybeUndefVal( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(inst)) |val| { - switch (val.tag()) { - .variable => return sema.failWithNeededComptime(block, src, reason), + switch (val.ip_index) { .generic_poison => return error.GenericPoison, + .none => switch (val.tag()) { + .variable => return sema.failWithNeededComptime(block, src, reason), + else => return val, + }, else => return val, } } @@ -1862,10 +1869,13 @@ fn resolveConstValue( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| { - switch (val.tag()) { - .undef => return sema.failWithUseOfUndef(block, src), - .variable => return sema.failWithNeededComptime(block, src, reason), + switch (val.ip_index) { .generic_poison => return error.GenericPoison, + .none => switch (val.tag()) { + .undef => return sema.failWithUseOfUndef(block, src), + .variable => return sema.failWithNeededComptime(block, src, reason), + else => return val, + }, else => return val, } } @@ -1900,12 +1910,11 @@ fn resolveMaybeUndefVal( const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null; switch (val.ip_index) { .generic_poison => return error.GenericPoison, - else => return val, .none => switch (val.tag()) { .variable => return null, - .generic_poison => return error.GenericPoison, else => return val, }, + else => return val, } } @@ -1919,12 +1928,18 @@ fn resolveMaybeUndefValIntable( ) CompileError!?Value { const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null; var check = val; - while (true) switch (check.tag()) { - .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return null, - .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr, - .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr, - .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr, + while (true) switch (check.ip_index) { .generic_poison => return error.GenericPoison, + .none => switch (check.tag()) { + .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return null, + .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr, + .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr, + .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr, + else => { + try sema.resolveLazyValue(val); + return val; + }, + }, else => { try sema.resolveLazyValue(val); return val; @@ -6208,12 +6223,13 @@ fn popErrorReturnTrace( operand: Air.Inst.Ref, saved_error_trace_index: Air.Inst.Ref, ) CompileError!void { + const mod = sema.mod; var is_non_error: ?bool = null; var is_non_error_inst: Air.Inst.Ref = undefined; if (operand != .none) { is_non_error_inst = try sema.analyzeIsNonErr(block, src, operand); if (try sema.resolveDefinedValue(block, src, is_non_error_inst)) |cond_val| - is_non_error = cond_val.toBool(); + is_non_error = cond_val.toBool(mod); } else is_non_error = true; // no operand means pop unconditionally if (is_non_error == true) { @@ -7222,7 +7238,7 @@ fn analyzeInlineCallArg( if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; - switch (arg_val.tag()) { + switch (arg_val.ip_index) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. @@ -7261,7 +7277,7 @@ fn analyzeInlineCallArg( if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; - switch (arg_val.tag()) { + switch (arg_val.ip_index) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. @@ -7443,13 +7459,13 @@ fn instantiateGenericCall( arg_ty.hashWithHasher(&hasher, mod); generic_args[i] = .{ .ty = arg_ty, - .val = Value.initTag(.generic_poison), + .val = Value.generic_poison, .is_anytype = true, }; } else { generic_args[i] = .{ .ty = arg_ty, - .val = Value.initTag(.generic_poison), + .val = Value.generic_poison, .is_anytype = false, }; } @@ -7815,7 +7831,7 @@ fn resolveGenericInstantiationType( } else { child_sema.comptime_args[arg_i] = .{ .ty = copied_arg_ty, - .val = Value.initTag(.generic_poison), + .val = Value.generic_poison, }; } @@ -8780,9 +8796,9 @@ fn resolveGenericBody( switch (err) { error.GenericPoison => { if (dest_ty.ip_index == .type_type) { - return Value.initTag(.generic_poison_type); + return Value.generic_poison_type; } else { - return Value.initTag(.generic_poison); + return Value.generic_poison; } }, else => |e| return e, @@ -9394,7 +9410,7 @@ fn zirParam( if (is_comptime) { // If this is a comptime parameter we can add a constant generic_poison // since this is also a generic parameter. - const result = try sema.addConstant(param_ty, Value.initTag(.generic_poison)); + const result = try sema.addConstant(param_ty, Value.generic_poison); sema.inst_map.putAssumeCapacityNoClobber(inst, result); } else { // Otherwise we need a dummy runtime instruction. @@ -11819,8 +11835,9 @@ fn validateSwitchItemBool( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { + const mod = sema.mod; const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; - if (item_val.toBool()) { + if (item_val.toBool(mod)) { true_count.* += 1; } else { false_count.* += 1; @@ -15462,7 +15479,7 @@ fn cmpSelf( } else { if (resolved_type.zigTypeTag(mod) == .Bool) { // We can lower bool eq/neq more efficiently. - return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src); + return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(mod), rhs_src); } break :src rhs_src; } @@ -15472,7 +15489,7 @@ fn cmpSelf( if (resolved_type.zigTypeTag(mod) == .Bool) { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); - return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src); + return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(mod), lhs_src); } } break :src lhs_src; @@ -16815,6 +16832,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; @@ -16824,7 +16842,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (try sema.resolveMaybeUndefVal(operand)) |val| { return if (val.isUndef()) sema.addConstUndef(Type.bool) - else if (val.toBool()) + else if (val.toBool(mod)) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true; @@ -16842,6 +16860,7 @@ fn zirBoolBr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const datas = sema.code.instructions.items(.data); const inst_data = datas[inst].bool_br; const lhs = try sema.resolveInst(inst_data.lhs); @@ -16851,9 +16870,9 @@ fn zirBoolBr( const gpa = sema.gpa; if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| { - if (is_bool_or and lhs_val.toBool()) { + if (is_bool_or and lhs_val.toBool(mod)) { return Air.Inst.Ref.bool_true; - } else if (!is_bool_or and !lhs_val.toBool()) { + } else if (!is_bool_or and !lhs_val.toBool(mod)) { return Air.Inst.Ref.bool_false; } // comptime-known left-hand side. No need for a block here; the result @@ -16897,9 +16916,9 @@ fn zirBoolBr( const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst); if (!sema.typeOf(rhs_result).isNoReturn()) { if (try sema.resolveDefinedValue(rhs_block, sema.src, rhs_result)) |rhs_val| { - if (is_bool_or and rhs_val.toBool()) { + if (is_bool_or and rhs_val.toBool(mod)) { return Air.Inst.Ref.bool_true; - } else if (!is_bool_or and !rhs_val.toBool()) { + } else if (!is_bool_or and !rhs_val.toBool(mod)) { return Air.Inst.Ref.bool_false; } } @@ -17053,7 +17072,7 @@ fn zirCondbr( const cond = try sema.coerce(parent_block, Type.bool, uncasted_cond, cond_src); if (try sema.resolveDefinedValue(parent_block, cond_src, cond)) |cond_val| { - const body = if (cond_val.toBool()) then_body else else_body; + const body = if (cond_val.toBool(mod)) then_body else else_body; try sema.maybeErrorUnwrapCondbr(parent_block, body, extra.data.condition, cond_src); // We use `analyzeBodyInner` since we want to propagate any possible @@ -17126,7 +17145,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); if (is_non_err != .none) { const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?; - if (is_non_err_val.toBool()) { + if (is_non_err_val.toBool(mod)) { return sema.analyzeErrUnionPayload(parent_block, src, err_union_ty, err_union, operand_src, false); } // We can analyze the body directly in the parent block because we know there are @@ -17173,7 +17192,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); if (is_non_err != .none) { const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?; - if (is_non_err_val.toBool()) { + if (is_non_err_val.toBool(mod)) { return sema.analyzeErrUnionPayloadPtr(parent_block, src, operand, false, false); } // We can analyze the body directly in the parent block because we know there are @@ -18509,11 +18528,12 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(Type.u1); - if (val.toBool()) return sema.addConstant(Type.u1, Value.one); + if (val.toBool(mod)) return sema.addConstant(Type.u1, Value.one); return sema.addConstant(Type.u1, Value.zero); } return block.addUnOp(.bool_to_int, operand); @@ -18811,12 +18831,12 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const ty = try Type.ptr(sema.arena, mod, .{ .size = ptr_size, - .mutable = !is_const_val.toBool(), - .@"volatile" = is_volatile_val.toBool(), + .mutable = !is_const_val.toBool(mod), + .@"volatile" = is_volatile_val.toBool(mod), .@"align" = abi_align, .@"addrspace" = address_space_val.toEnum(std.builtin.AddressSpace), .pointee_type = try elem_ty.copy(sema.arena), - .@"allowzero" = is_allowzero_val.toBool(), + .@"allowzero" = is_allowzero_val.toBool(mod), .sentinel = actual_sentinel, }); return sema.addType(ty); @@ -18932,7 +18952,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.fail(block, src, "non-packed struct does not support backing integer type", .{}); } - return try sema.reifyStruct(block, inst, src, layout, backing_int_val, fields_val, name_strategy, is_tuple_val.toBool()); + return try sema.reifyStruct(block, inst, src, layout, backing_int_val, fields_val, name_strategy, is_tuple_val.toBool(mod)); }, .Enum => { const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data; @@ -18961,7 +18981,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); enum_ty_payload.* = .{ .base = .{ - .tag = if (!is_exhaustive_val.toBool()) + .tag = if (!is_exhaustive_val.toBool(mod)) .enum_nonexhaustive else .enum_full, @@ -19295,9 +19315,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // alignment: comptime_int, const alignment_val = struct_val[1]; // is_generic: bool, - const is_generic = struct_val[2].toBool(); + const is_generic = struct_val[2].toBool(mod); // is_var_args: bool, - const is_var_args = struct_val[3].toBool(); + const is_var_args = struct_val[3].toBool(mod); // return_type: ?type, const return_type_val = struct_val[4]; // args: []const Param, @@ -19339,9 +19359,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const arg_val = arg.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // is_generic: bool, - const arg_is_generic = arg_val[0].toBool(); + const arg_is_generic = arg_val[0].toBool(mod); // is_noalias: bool, - const arg_is_noalias = arg_val[1].toBool(); + const arg_is_noalias = arg_val[1].toBool(mod); // type: ?type, const param_type_opt_val = arg_val[2]; @@ -19455,9 +19475,9 @@ fn reifyStruct( if (layout == .Packed) { if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{}); - if (is_comptime_val.toBool()) return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{}); + if (is_comptime_val.toBool(mod)) return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{}); } - if (layout == .Extern and is_comptime_val.toBool()) { + if (layout == .Extern and is_comptime_val.toBool(mod)) { return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{}); } @@ -19499,7 +19519,7 @@ fn reifyStruct( opt_val; break :blk try payload_val.copy(new_decl_arena_allocator); } else Value.initTag(.unreachable_value); - if (is_comptime_val.toBool() and default_val.tag() == .unreachable_value) { + if (is_comptime_val.toBool(mod) and default_val.tag() == .unreachable_value) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } @@ -19508,7 +19528,7 @@ fn reifyStruct( .ty = field_ty, .abi_align = abi_align, .default_val = default_val, - .is_comptime = is_comptime_val.toBool(), + .is_comptime = is_comptime_val.toBool(mod), .offset = undefined, }; @@ -21446,7 +21466,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const elems = try sema.gpa.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { const pred_elem_val = pred_val.elemValueBuffer(sema.mod, i, &buf); - const should_choose_a = pred_elem_val.toBool(); + const should_choose_a = pred_elem_val.toBool(mod); if (should_choose_a) { elem.* = a_val.elemValueBuffer(sema.mod, i, &buf); } else { @@ -22956,7 +22976,7 @@ fn resolveExternOptions( .name = name, .library_name = library_name, .linkage = linkage, - .is_thread_local = is_thread_local_val.toBool(), + .is_thread_local = is_thread_local_val.toBool(mod), }; } @@ -31760,8 +31780,10 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .enum_literal_type, .manyptr_u8_type, .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, .single_const_pointer_to_comptime_int_type, .const_slice_u8_type, + .const_slice_u8_sentinel_0_type, .anyerror_void_error_union_type, .generic_poison_type, .var_args_param_type, @@ -31771,6 +31793,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .undef => unreachable, .zero => unreachable, .zero_usize => unreachable, + .zero_u8 => unreachable, .one => unreachable, .one_usize => unreachable, .calling_convention_c => unreachable, @@ -34225,12 +34248,9 @@ fn intFitsInType( switch (val.tag()) { .zero, .undef, - .bool_false, => return true, - .one, - .bool_true, - => switch (ty.zigTypeTag(mod)) { + .one => switch (ty.zigTypeTag(mod)) { .Int => { const info = ty.intInfo(mod); return switch (info.signedness) { diff --git a/src/TypedValue.zig b/src/TypedValue.zig index dc556942c3d6..7302f42e570f 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -77,66 +77,14 @@ pub fn print( return writer.writeAll("(variable)"); while (true) switch (val.tag()) { - .u1_type => return writer.writeAll("u1"), - .u8_type => return writer.writeAll("u8"), - .i8_type => return writer.writeAll("i8"), - .u16_type => return writer.writeAll("u16"), - .i16_type => return writer.writeAll("i16"), - .u29_type => return writer.writeAll("u29"), - .u32_type => return writer.writeAll("u32"), - .i32_type => return writer.writeAll("i32"), - .u64_type => return writer.writeAll("u64"), - .i64_type => return writer.writeAll("i64"), - .u128_type => return writer.writeAll("u128"), - .i128_type => return writer.writeAll("i128"), - .isize_type => return writer.writeAll("isize"), - .usize_type => return writer.writeAll("usize"), - .c_char_type => return writer.writeAll("c_char"), - .c_short_type => return writer.writeAll("c_short"), - .c_ushort_type => return writer.writeAll("c_ushort"), - .c_int_type => return writer.writeAll("c_int"), - .c_uint_type => return writer.writeAll("c_uint"), - .c_long_type => return writer.writeAll("c_long"), - .c_ulong_type => return writer.writeAll("c_ulong"), - .c_longlong_type => return writer.writeAll("c_longlong"), - .c_ulonglong_type => return writer.writeAll("c_ulonglong"), - .c_longdouble_type => return writer.writeAll("c_longdouble"), - .f16_type => return writer.writeAll("f16"), - .f32_type => return writer.writeAll("f32"), - .f64_type => return writer.writeAll("f64"), - .f80_type => return writer.writeAll("f80"), - .f128_type => return writer.writeAll("f128"), - .anyopaque_type => return writer.writeAll("anyopaque"), - .bool_type => return writer.writeAll("bool"), - .void_type => return writer.writeAll("void"), - .type_type => return writer.writeAll("type"), - .anyerror_type => return writer.writeAll("anyerror"), - .comptime_int_type => return writer.writeAll("comptime_int"), - .comptime_float_type => return writer.writeAll("comptime_float"), - .noreturn_type => return writer.writeAll("noreturn"), - .null_type => return writer.writeAll("@Type(.Null)"), - .undefined_type => return writer.writeAll("@Type(.Undefined)"), .single_const_pointer_to_comptime_int_type => return writer.writeAll("*const comptime_int"), - .anyframe_type => return writer.writeAll("anyframe"), .const_slice_u8_type => return writer.writeAll("[]const u8"), .const_slice_u8_sentinel_0_type => return writer.writeAll("[:0]const u8"), .anyerror_void_error_union_type => return writer.writeAll("anyerror!void"), - .enum_literal_type => return writer.writeAll("@Type(.EnumLiteral)"), .manyptr_u8_type => return writer.writeAll("[*]u8"), .manyptr_const_u8_type => return writer.writeAll("[*]const u8"), .manyptr_const_u8_sentinel_0_type => return writer.writeAll("[*:0]const u8"), - .atomic_order_type => return writer.writeAll("std.builtin.AtomicOrder"), - .atomic_rmw_op_type => return writer.writeAll("std.builtin.AtomicRmwOp"), - .calling_convention_type => return writer.writeAll("std.builtin.CallingConvention"), - .address_space_type => return writer.writeAll("std.builtin.AddressSpace"), - .float_mode_type => return writer.writeAll("std.builtin.FloatMode"), - .reduce_op_type => return writer.writeAll("std.builtin.ReduceOp"), - .modifier_type => return writer.writeAll("std.builtin.CallModifier"), - .prefetch_options_type => return writer.writeAll("std.builtin.PrefetchOptions"), - .export_options_type => return writer.writeAll("std.builtin.ExportOptions"), - .extern_options_type => return writer.writeAll("std.builtin.ExternOptions"), - .type_info_type => return writer.writeAll("std.builtin.Type"), .empty_struct_value, .aggregate => { if (level == 0) { @@ -221,11 +169,8 @@ pub fn print( .undef => return writer.writeAll("undefined"), .zero => return writer.writeAll("0"), .one => return writer.writeAll("1"), - .void_value => return writer.writeAll("{}"), .unreachable_value => return writer.writeAll("unreachable"), .the_only_possible_value => return writer.writeAll("0"), - .bool_true => return writer.writeAll("true"), - .bool_false => return writer.writeAll("false"), .ty => return val.castTag(.ty).?.data.print(writer, mod), .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), @@ -487,8 +432,6 @@ pub fn print( // TODO these should not appear in this function .inferred_alloc => return writer.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), - .generic_poison_type => return writer.writeAll("(generic poison type)"), - .generic_poison => return writer.writeAll("(generic poison)"), .runtime_value => return writer.writeAll("[runtime value]"), }; } diff --git a/src/Zir.zig b/src/Zir.zig index 1063377fc7de..7b708ab04e62 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2106,8 +2106,10 @@ pub const Inst = struct { type_info_type = @enumToInt(InternPool.Index.type_info_type), manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type), manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), + manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type), single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), + const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), @@ -2115,6 +2117,7 @@ pub const Inst = struct { undef = @enumToInt(InternPool.Index.undef), zero = @enumToInt(InternPool.Index.zero), zero_usize = @enumToInt(InternPool.Index.zero_usize), + zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), diff --git a/src/codegen.zig b/src/codegen.zig index 295409781e90..a3ecf88d50de 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -494,7 +494,7 @@ pub fn generateSymbol( return Result.ok; }, .Bool => { - const x: u8 = @boolToInt(typed_value.val.toBool()); + const x: u8 = @boolToInt(typed_value.val.toBool(mod)); try code.append(x); return Result.ok; }, @@ -1213,7 +1213,7 @@ pub fn genTypedValue( } }, .Bool => { - return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) }); + return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool(mod)) }); }, .Optional => { if (typed_value.ty.isPtrLikeOptional(mod)) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 2e5e45d54cbb..327ccb011900 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1191,7 +1191,7 @@ pub const DeclGen = struct { } }, .Bool => { - if (val.toBool()) { + if (val.toBool(mod)) { return writer.writeAll("true"); } else { return writer.writeAll("false"); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 232cd9d42f38..1a092dff6925 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2793,7 +2793,7 @@ pub const DeclGen = struct { if (std.debug.runtime_safety and false) check: { if (t.zigTypeTag(mod) == .Opaque) break :check; if (!t.hasRuntimeBits(mod)) break :check; - if (!llvm_ty.isSized().toBool()) break :check; + if (!llvm_ty.isSized().toBool(mod)) break :check; const zig_size = t.abiSize(mod); const llvm_size = dg.object.target_data.abiSizeOfType(llvm_ty); @@ -3272,7 +3272,7 @@ pub const DeclGen = struct { switch (tv.ty.zigTypeTag(mod)) { .Bool => { const llvm_type = try dg.lowerType(tv.ty); - return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); + return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull(); }, // TODO this duplicates code with Pointer but they should share the handling // of the tv.val.tag() and then Int should do extra constPtrToInt on top diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 3a5f5d6f6aea..417a8035b53e 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -621,7 +621,7 @@ pub const DeclGen = struct { switch (ty.zigTypeTag(mod)) { .Int => try self.addInt(ty, val), .Float => try self.addFloat(ty, val), - .Bool => try self.addConstBool(val.toBool()), + .Bool => try self.addConstBool(val.toBool(mod)), .Array => switch (val.tag()) { .aggregate => { const elem_vals = val.castTag(.aggregate).?.data; @@ -989,8 +989,8 @@ pub const DeclGen = struct { } }, .Bool => switch (repr) { - .direct => return try self.spv.constBool(result_ty_ref, val.toBool()), - .indirect => return try self.spv.constInt(result_ty_ref, @boolToInt(val.toBool())), + .direct => return try self.spv.constBool(result_ty_ref, val.toBool(mod)), + .indirect => return try self.spv.constInt(result_ty_ref, @boolToInt(val.toBool(mod))), }, .Float => return switch (ty.floatBits(target)) { 16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16) } } }), diff --git a/src/type.zig b/src/type.zig index 7db7ad316bfe..7c1414a1ebea 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2054,22 +2054,22 @@ pub const Type = struct { pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { if (self.ip_index != .none) return self.ip_index.toValue(); switch (self.tag()) { - .u1 => return Value.initTag(.u1_type), - .u8 => return Value.initTag(.u8_type), - .i8 => return Value.initTag(.i8_type), - .u16 => return Value.initTag(.u16_type), - .u29 => return Value.initTag(.u29_type), - .i16 => return Value.initTag(.i16_type), - .u32 => return Value.initTag(.u32_type), - .i32 => return Value.initTag(.i32_type), - .u64 => return Value.initTag(.u64_type), - .i64 => return Value.initTag(.i64_type), - .single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type), - .const_slice_u8 => return Value.initTag(.const_slice_u8_type), - .const_slice_u8_sentinel_0 => return Value.initTag(.const_slice_u8_sentinel_0_type), - .manyptr_u8 => return Value.initTag(.manyptr_u8_type), - .manyptr_const_u8 => return Value.initTag(.manyptr_const_u8_type), - .manyptr_const_u8_sentinel_0 => return Value.initTag(.manyptr_const_u8_sentinel_0_type), + .u1 => return Value{ .ip_index = .u1_type, .legacy = undefined }, + .u8 => return Value{ .ip_index = .u8_type, .legacy = undefined }, + .i8 => return Value{ .ip_index = .i8_type, .legacy = undefined }, + .u16 => return Value{ .ip_index = .u16_type, .legacy = undefined }, + .u29 => return Value{ .ip_index = .u29_type, .legacy = undefined }, + .i16 => return Value{ .ip_index = .i16_type, .legacy = undefined }, + .u32 => return Value{ .ip_index = .u32_type, .legacy = undefined }, + .i32 => return Value{ .ip_index = .i32_type, .legacy = undefined }, + .u64 => return Value{ .ip_index = .u64_type, .legacy = undefined }, + .i64 => return Value{ .ip_index = .i64_type, .legacy = undefined }, + .single_const_pointer_to_comptime_int => return Value{ .ip_index = .single_const_pointer_to_comptime_int_type, .legacy = undefined }, + .const_slice_u8 => return Value{ .ip_index = .const_slice_u8_type, .legacy = undefined }, + .const_slice_u8_sentinel_0 => return Value{ .ip_index = .const_slice_u8_sentinel_0_type, .legacy = undefined }, + .manyptr_u8 => return Value{ .ip_index = .manyptr_u8_type, .legacy = undefined }, + .manyptr_const_u8 => return Value{ .ip_index = .manyptr_const_u8_type, .legacy = undefined }, + .manyptr_const_u8_sentinel_0 => return Value{ .ip_index = .manyptr_const_u8_sentinel_0_type, .legacy = undefined }, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, else => return Value.Tag.ty.create(allocator, self), diff --git a/src/value.zig b/src/value.zig index 2f9f395017bd..396aab2012f7 100644 --- a/src/value.zig +++ b/src/value.zig @@ -33,58 +33,6 @@ pub const Value = struct { // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - u1_type, - u8_type, - i8_type, - u16_type, - i16_type, - u29_type, - u32_type, - i32_type, - u64_type, - i64_type, - u128_type, - i128_type, - usize_type, - isize_type, - c_char_type, - c_short_type, - c_ushort_type, - c_int_type, - c_uint_type, - c_long_type, - c_ulong_type, - c_longlong_type, - c_ulonglong_type, - c_longdouble_type, - f16_type, - f32_type, - f64_type, - f80_type, - f128_type, - anyopaque_type, - bool_type, - void_type, - type_type, - anyerror_type, - comptime_int_type, - comptime_float_type, - noreturn_type, - anyframe_type, - null_type, - undefined_type, - enum_literal_type, - atomic_order_type, - atomic_rmw_op_type, - calling_convention_type, - address_space_type, - float_mode_type, - reduce_op_type, - modifier_type, - prefetch_options_type, - export_options_type, - extern_options_type, - type_info_type, manyptr_u8_type, manyptr_const_u8_type, manyptr_const_u8_sentinel_0_type, @@ -92,19 +40,14 @@ pub const Value = struct { const_slice_u8_type, const_slice_u8_sentinel_0_type, anyerror_void_error_union_type, - generic_poison_type, undef, zero, one, - void_value, unreachable_value, /// The only possible value for a particular type, which is stored externally. the_only_possible_value, null_value, - bool_true, - bool_false, - generic_poison, empty_struct_value, empty_array, // See last_no_payload_tag below. @@ -197,78 +140,22 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .u1_type, - .u8_type, - .i8_type, - .u16_type, - .i16_type, - .u29_type, - .u32_type, - .i32_type, - .u64_type, - .i64_type, - .u128_type, - .i128_type, - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - .c_longdouble_type, - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .anyopaque_type, - .bool_type, - .void_type, - .type_type, - .anyerror_type, - .comptime_int_type, - .comptime_float_type, - .noreturn_type, - .null_type, - .undefined_type, .single_const_pointer_to_comptime_int_type, - .anyframe_type, .const_slice_u8_type, .const_slice_u8_sentinel_0_type, .anyerror_void_error_union_type, - .generic_poison_type, - .enum_literal_type, + .undef, .zero, .one, - .void_value, .unreachable_value, .the_only_possible_value, .empty_struct_value, .empty_array, .null_value, - .bool_true, - .bool_false, .manyptr_u8_type, .manyptr_const_u8_type, .manyptr_const_u8_sentinel_0_type, - .atomic_order_type, - .atomic_rmw_op_type, - .calling_convention_type, - .address_space_type, - .float_mode_type, - .reduce_op_type, - .modifier_type, - .prefetch_options_type, - .export_options_type, - .extern_options_type, - .type_info_type, - .generic_poison, => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), .int_big_positive, @@ -418,78 +305,22 @@ pub const Value = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .u1_type, - .u8_type, - .i8_type, - .u16_type, - .i16_type, - .u29_type, - .u32_type, - .i32_type, - .u64_type, - .i64_type, - .u128_type, - .i128_type, - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - .c_longdouble_type, - .f16_type, - .f32_type, - .f64_type, - .f80_type, - .f128_type, - .anyopaque_type, - .bool_type, - .void_type, - .type_type, - .anyerror_type, - .comptime_int_type, - .comptime_float_type, - .noreturn_type, - .null_type, - .undefined_type, .single_const_pointer_to_comptime_int_type, - .anyframe_type, .const_slice_u8_type, .const_slice_u8_sentinel_0_type, .anyerror_void_error_union_type, - .generic_poison_type, - .enum_literal_type, + .undef, .zero, .one, - .void_value, .unreachable_value, .the_only_possible_value, .empty_array, .null_value, - .bool_true, - .bool_false, .empty_struct_value, .manyptr_u8_type, .manyptr_const_u8_type, .manyptr_const_u8_sentinel_0_type, - .atomic_order_type, - .atomic_rmw_op_type, - .calling_convention_type, - .address_space_type, - .float_mode_type, - .reduce_op_type, - .modifier_type, - .prefetch_options_type, - .export_options_type, - .extern_options_type, - .type_info_type, - .generic_poison, => unreachable, .ty, .lazy_align, .lazy_size => { @@ -722,67 +553,13 @@ pub const Value = struct { } var val = start_val; while (true) switch (val.tag()) { - .u1_type => return out_stream.writeAll("u1"), - .u8_type => return out_stream.writeAll("u8"), - .i8_type => return out_stream.writeAll("i8"), - .u16_type => return out_stream.writeAll("u16"), - .u29_type => return out_stream.writeAll("u29"), - .i16_type => return out_stream.writeAll("i16"), - .u32_type => return out_stream.writeAll("u32"), - .i32_type => return out_stream.writeAll("i32"), - .u64_type => return out_stream.writeAll("u64"), - .i64_type => return out_stream.writeAll("i64"), - .u128_type => return out_stream.writeAll("u128"), - .i128_type => return out_stream.writeAll("i128"), - .isize_type => return out_stream.writeAll("isize"), - .usize_type => return out_stream.writeAll("usize"), - .c_char_type => return out_stream.writeAll("c_char"), - .c_short_type => return out_stream.writeAll("c_short"), - .c_ushort_type => return out_stream.writeAll("c_ushort"), - .c_int_type => return out_stream.writeAll("c_int"), - .c_uint_type => return out_stream.writeAll("c_uint"), - .c_long_type => return out_stream.writeAll("c_long"), - .c_ulong_type => return out_stream.writeAll("c_ulong"), - .c_longlong_type => return out_stream.writeAll("c_longlong"), - .c_ulonglong_type => return out_stream.writeAll("c_ulonglong"), - .c_longdouble_type => return out_stream.writeAll("c_longdouble"), - .f16_type => return out_stream.writeAll("f16"), - .f32_type => return out_stream.writeAll("f32"), - .f64_type => return out_stream.writeAll("f64"), - .f80_type => return out_stream.writeAll("f80"), - .f128_type => return out_stream.writeAll("f128"), - .anyopaque_type => return out_stream.writeAll("anyopaque"), - .bool_type => return out_stream.writeAll("bool"), - .void_type => return out_stream.writeAll("void"), - .type_type => return out_stream.writeAll("type"), - .anyerror_type => return out_stream.writeAll("anyerror"), - .comptime_int_type => return out_stream.writeAll("comptime_int"), - .comptime_float_type => return out_stream.writeAll("comptime_float"), - .noreturn_type => return out_stream.writeAll("noreturn"), - .null_type => return out_stream.writeAll("@Type(.Null)"), - .undefined_type => return out_stream.writeAll("@Type(.Undefined)"), .single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"), - .anyframe_type => return out_stream.writeAll("anyframe"), .const_slice_u8_type => return out_stream.writeAll("[]const u8"), .const_slice_u8_sentinel_0_type => return out_stream.writeAll("[:0]const u8"), .anyerror_void_error_union_type => return out_stream.writeAll("anyerror!void"), - .generic_poison_type => return out_stream.writeAll("(generic poison type)"), - .generic_poison => return out_stream.writeAll("(generic poison)"), - .enum_literal_type => return out_stream.writeAll("@Type(.EnumLiteral)"), .manyptr_u8_type => return out_stream.writeAll("[*]u8"), .manyptr_const_u8_type => return out_stream.writeAll("[*]const u8"), .manyptr_const_u8_sentinel_0_type => return out_stream.writeAll("[*:0]const u8"), - .atomic_order_type => return out_stream.writeAll("std.builtin.AtomicOrder"), - .atomic_rmw_op_type => return out_stream.writeAll("std.builtin.AtomicRmwOp"), - .calling_convention_type => return out_stream.writeAll("std.builtin.CallingConvention"), - .address_space_type => return out_stream.writeAll("std.builtin.AddressSpace"), - .float_mode_type => return out_stream.writeAll("std.builtin.FloatMode"), - .reduce_op_type => return out_stream.writeAll("std.builtin.ReduceOp"), - .modifier_type => return out_stream.writeAll("std.builtin.CallModifier"), - .prefetch_options_type => return out_stream.writeAll("std.builtin.PrefetchOptions"), - .export_options_type => return out_stream.writeAll("std.builtin.ExportOptions"), - .extern_options_type => return out_stream.writeAll("std.builtin.ExternOptions"), - .type_info_type => return out_stream.writeAll("std.builtin.Type"), .empty_struct_value => return out_stream.writeAll("struct {}{}"), .aggregate => { @@ -795,11 +572,8 @@ pub const Value = struct { .undef => return out_stream.writeAll("undefined"), .zero => return out_stream.writeAll("0"), .one => return out_stream.writeAll("1"), - .void_value => return out_stream.writeAll("{}"), .unreachable_value => return out_stream.writeAll("unreachable"), .the_only_possible_value => return out_stream.writeAll("(the only possible value)"), - .bool_true => return out_stream.writeAll("true"), - .bool_false => return out_stream.writeAll("false"), .ty => return val.castTag(.ty).?.data.dump("", options, out_stream), .lazy_align => { try out_stream.writeAll("@alignOf("); @@ -943,74 +717,16 @@ pub const Value = struct { /// Asserts that the value is representable as a type. pub fn toType(self: Value) Type { - if (self.ip_index != .none) { - return .{ - .ip_index = self.ip_index, - .legacy = undefined, - }; - } + if (self.ip_index != .none) return self.ip_index.toType(); return switch (self.tag()) { .ty => self.castTag(.ty).?.data, - .u1_type => Type.u1, - .u8_type => Type.u8, - .i8_type => Type.i8, - .u16_type => Type.u16, - .i16_type => Type.i16, - .u29_type => Type.u29, - .u32_type => Type.u32, - .i32_type => Type.i32, - .u64_type => Type.u64, - .i64_type => Type.i64, - .u128_type => Type.u128, - .i128_type => Type.i128, - .usize_type => Type.usize, - .isize_type => Type.isize, - .c_char_type => Type.c_char, - .c_short_type => Type.c_short, - .c_ushort_type => Type.c_ushort, - .c_int_type => Type.c_int, - .c_uint_type => Type.c_uint, - .c_long_type => Type.c_long, - .c_ulong_type => Type.c_ulong, - .c_longlong_type => Type.c_longlong, - .c_ulonglong_type => Type.c_ulonglong, - .c_longdouble_type => Type.c_longdouble, - .f16_type => Type.f16, - .f32_type => Type.f32, - .f64_type => Type.f64, - .f80_type => Type.f80, - .f128_type => Type.f128, - .anyopaque_type => Type.anyopaque, - .bool_type => Type.bool, - .void_type => Type.void, - .type_type => Type.type, - .anyerror_type => Type.anyerror, - .comptime_int_type => Type.comptime_int, - .comptime_float_type => Type.comptime_float, - .noreturn_type => Type.noreturn, - .null_type => Type.null, - .undefined_type => Type.undefined, .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), - .anyframe_type => Type.@"anyframe", .const_slice_u8_type => Type.initTag(.const_slice_u8), .const_slice_u8_sentinel_0_type => Type.initTag(.const_slice_u8_sentinel_0), .anyerror_void_error_union_type => Type.initTag(.anyerror_void_error_union), - .generic_poison_type => .{ .ip_index = .generic_poison_type, .legacy = undefined }, - .enum_literal_type => .{ .ip_index = .enum_literal_type, .legacy = undefined }, .manyptr_u8_type => Type.initTag(.manyptr_u8), .manyptr_const_u8_type => Type.initTag(.manyptr_const_u8), .manyptr_const_u8_sentinel_0_type => Type.initTag(.manyptr_const_u8_sentinel_0), - .atomic_order_type => .{ .ip_index = .atomic_order_type, .legacy = undefined }, - .atomic_rmw_op_type => .{ .ip_index = .atomic_rmw_op_type, .legacy = undefined }, - .calling_convention_type => .{ .ip_index = .calling_convention_type, .legacy = undefined }, - .address_space_type => .{ .ip_index = .address_space_type, .legacy = undefined }, - .float_mode_type => .{ .ip_index = .float_mode_type, .legacy = undefined }, - .reduce_op_type => .{ .ip_index = .reduce_op_type, .legacy = undefined }, - .modifier_type => .{ .ip_index = .call_modifier_type, .legacy = undefined }, - .prefetch_options_type => .{ .ip_index = .prefetch_options_type, .legacy = undefined }, - .export_options_type => .{ .ip_index = .export_options_type, .legacy = undefined }, - .extern_options_type => .{ .ip_index = .extern_options_type, .legacy = undefined }, - .type_info_type => .{ .ip_index = .type_info_type, .legacy = undefined }, else => unreachable, }; @@ -1133,58 +849,63 @@ pub const Value = struct { mod: *const Module, opt_sema: ?*Sema, ) Module.CompileError!BigIntConst { - switch (val.tag()) { - .null_value, - .zero, - .bool_false, - .the_only_possible_value, // i0, u0 - => return BigIntMutable.init(&space.limbs, 0).toConst(), + switch (val.ip_index) { + .bool_false => return BigIntMutable.init(&space.limbs, 0).toConst(), + .bool_true => return BigIntMutable.init(&space.limbs, 1).toConst(), + .none => switch (val.tag()) { + .null_value, + .zero, + .the_only_possible_value, // i0, u0 + => return BigIntMutable.init(&space.limbs, 0).toConst(), - .one, - .bool_true, - => return BigIntMutable.init(&space.limbs, 1).toConst(), + .one => return BigIntMutable.init(&space.limbs, 1).toConst(), - .enum_field_index => { - const index = val.castTag(.enum_field_index).?.data; - return BigIntMutable.init(&space.limbs, index).toConst(); - }, - .runtime_value => { - const sub_val = val.castTag(.runtime_value).?.data; - return sub_val.toBigIntAdvanced(space, mod, opt_sema); - }, - .int_u64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(), - .int_i64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(), - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt(), - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt(), + .enum_field_index => { + const index = val.castTag(.enum_field_index).?.data; + return BigIntMutable.init(&space.limbs, index).toConst(); + }, + .runtime_value => { + const sub_val = val.castTag(.runtime_value).?.data; + return sub_val.toBigIntAdvanced(space, mod, opt_sema); + }, + .int_u64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(), + .int_i64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(), + .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt(), + .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt(), - .undef => unreachable, + .undef => unreachable, - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - try sema.resolveTypeLayout(ty); - } - const x = ty.abiAlignment(mod); - return BigIntMutable.init(&space.limbs, x).toConst(); - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - try sema.resolveTypeLayout(ty); - } - const x = ty.abiSize(mod); - return BigIntMutable.init(&space.limbs, x).toConst(); - }, + .lazy_align => { + const ty = val.castTag(.lazy_align).?.data; + if (opt_sema) |sema| { + try sema.resolveTypeLayout(ty); + } + const x = ty.abiAlignment(mod); + return BigIntMutable.init(&space.limbs, x).toConst(); + }, + .lazy_size => { + const ty = val.castTag(.lazy_size).?.data; + if (opt_sema) |sema| { + try sema.resolveTypeLayout(ty); + } + const x = ty.abiSize(mod); + return BigIntMutable.init(&space.limbs, x).toConst(); + }, - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(mod, opt_sema)).?; - const elem_size = elem_ptr.elem_ty.abiSize(mod); - const new_addr = array_addr + elem_size * elem_ptr.index; - return BigIntMutable.init(&space.limbs, new_addr).toConst(); - }, + .elem_ptr => { + const elem_ptr = val.castTag(.elem_ptr).?.data; + const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(mod, opt_sema)).?; + const elem_size = elem_ptr.elem_ty.abiSize(mod); + const new_addr = array_addr + elem_size * elem_ptr.index; + return BigIntMutable.init(&space.limbs, new_addr).toConst(); + }, - else => unreachable, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| return int.big_int, + else => unreachable, + }, } } @@ -1197,41 +918,46 @@ pub const Value = struct { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedIntAdvanced(val: Value, mod: *const Module, opt_sema: ?*Sema) !?u64 { - switch (val.tag()) { - .zero, - .bool_false, - .the_only_possible_value, // i0, u0 - => return 0, + switch (val.ip_index) { + .bool_false => return 0, + .bool_true => return 1, + .none => switch (val.tag()) { + .zero, + .the_only_possible_value, // i0, u0 + => return 0, - .one, - .bool_true, - => return 1, + .one => return 1, - .int_u64 => return val.castTag(.int_u64).?.data, - .int_i64 => return @intCast(u64, val.castTag(.int_i64).?.data), - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(u64) catch null, - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null, + .int_u64 => return val.castTag(.int_u64).?.data, + .int_i64 => return @intCast(u64, val.castTag(.int_i64).?.data), + .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(u64) catch null, + .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null, - .undef => unreachable, + .undef => unreachable, - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - return (try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; - } else { - return ty.abiAlignment(mod); - } + .lazy_align => { + const ty = val.castTag(.lazy_align).?.data; + if (opt_sema) |sema| { + return (try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; + } else { + return ty.abiAlignment(mod); + } + }, + .lazy_size => { + const ty = val.castTag(.lazy_size).?.data; + if (opt_sema) |sema| { + return (try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar; + } else { + return ty.abiSize(mod); + } + }, + + else => return null, }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - return (try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar; - } else { - return ty.abiSize(mod); - } + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| return int.big_int.to(u64) catch null, + else => unreachable, }, - - else => return null, } } @@ -1242,51 +968,65 @@ pub const Value = struct { /// Asserts the value is an integer and it fits in a i64 pub fn toSignedInt(val: Value, mod: *const Module) i64 { - switch (val.tag()) { - .zero, - .bool_false, - .the_only_possible_value, // i0, u0 - => return 0, + switch (val.ip_index) { + .bool_false => return 0, + .bool_true => return 1, + .none => switch (val.tag()) { + .zero, + .the_only_possible_value, // i0, u0 + => return 0, - .one, - .bool_true, - => return 1, + .one => return 1, - .int_u64 => return @intCast(i64, val.castTag(.int_u64).?.data), - .int_i64 => return val.castTag(.int_i64).?.data, - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(i64) catch unreachable, - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(i64) catch unreachable, + .int_u64 => return @intCast(i64, val.castTag(.int_u64).?.data), + .int_i64 => return val.castTag(.int_i64).?.data, + .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(i64) catch unreachable, + .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(i64) catch unreachable, - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - return @intCast(i64, ty.abiAlignment(mod)); + .lazy_align => { + const ty = val.castTag(.lazy_align).?.data; + return @intCast(i64, ty.abiAlignment(mod)); + }, + .lazy_size => { + const ty = val.castTag(.lazy_size).?.data; + return @intCast(i64, ty.abiSize(mod)); + }, + + .undef => unreachable, + else => unreachable, }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - return @intCast(i64, ty.abiSize(mod)); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| return int.big_int.to(i64) catch unreachable, + else => unreachable, }, - - .undef => unreachable, - else => unreachable, } } - pub fn toBool(self: Value) bool { - return switch (self.tag()) { - .bool_true, .one => true, - .bool_false, .zero => false, - .int_u64 => switch (self.castTag(.int_u64).?.data) { - 0 => false, - 1 => true, + pub fn toBool(val: Value, mod: *const Module) bool { + switch (val.ip_index) { + .bool_true => return true, + .bool_false => return false, + .none => return switch (val.tag()) { + .one => true, + .zero => false, + + .int_u64 => switch (val.castTag(.int_u64).?.data) { + 0 => false, + 1 => true, + else => unreachable, + }, + .int_i64 => switch (val.castTag(.int_i64).?.data) { + 0 => false, + 1 => true, + else => unreachable, + }, else => unreachable, }, - .int_i64 => switch (self.castTag(.int_i64).?.data) { - 0 => false, - 1 => true, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| return !int.big_int.eqZero(), else => unreachable, }, - else => unreachable, - }; + } } fn isDeclRef(val: Value) bool { @@ -1319,7 +1059,7 @@ pub const Value = struct { switch (ty.zigTypeTag(mod)) { .Void => {}, .Bool => { - buffer[0] = @boolToInt(val.toBool()); + buffer[0] = @boolToInt(val.toBool(mod)); }, .Int, .Enum => { const int_info = ty.intInfo(mod); @@ -1442,7 +1182,7 @@ pub const Value = struct { .Little => bit_offset / 8, .Big => buffer.len - bit_offset / 8 - 1, }; - if (val.toBool()) { + if (val.toBool(mod)) { buffer[byte_index] |= (@as(u8, 1) << @intCast(u3, bit_offset % 8)); } else { buffer[byte_index] &= ~(@as(u8, 1) << @intCast(u3, bit_offset % 8)); @@ -1802,90 +1542,117 @@ pub const Value = struct { pub fn clz(val: Value, ty: Type, mod: *const Module) u64 { const ty_bits = ty.intInfo(mod).bits; - switch (val.tag()) { - .zero, .bool_false => return ty_bits, - .one, .bool_true => return ty_bits - 1, + switch (val.ip_index) { + .bool_false => return ty_bits, + .bool_true => return ty_bits - 1, + .none => switch (val.tag()) { + .zero => return ty_bits, + .one => return ty_bits - 1, + + .int_u64 => { + const big = @clz(val.castTag(.int_u64).?.data); + return big + ty_bits - 64; + }, + .int_i64 => { + @panic("TODO implement i64 Value clz"); + }, + .int_big_positive => { + const bigint = val.castTag(.int_big_positive).?.asBigInt(); + return bigint.clz(ty_bits); + }, + .int_big_negative => { + @panic("TODO implement int_big_negative Value clz"); + }, - .int_u64 => { - const big = @clz(val.castTag(.int_u64).?.data); - return big + ty_bits - 64; - }, - .int_i64 => { - @panic("TODO implement i64 Value clz"); - }, - .int_big_positive => { - const bigint = val.castTag(.int_big_positive).?.asBigInt(); - return bigint.clz(ty_bits); - }, - .int_big_negative => { - @panic("TODO implement int_big_negative Value clz"); - }, + .the_only_possible_value => { + assert(ty_bits == 0); + return ty_bits; + }, - .the_only_possible_value => { - assert(ty_bits == 0); - return ty_bits; - }, + .lazy_align, .lazy_size => { + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; + return bigint.clz(ty_bits); + }, - .lazy_align, .lazy_size => { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; - return bigint.clz(ty_bits); + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| return int.big_int.clz(ty_bits), + else => unreachable, }, - - else => unreachable, } } pub fn ctz(val: Value, ty: Type, mod: *const Module) u64 { const ty_bits = ty.intInfo(mod).bits; - switch (val.tag()) { - .zero, .bool_false => return ty_bits, - .one, .bool_true => return 0, + switch (val.ip_index) { + .bool_false => return ty_bits, + .bool_true => return 0, + .none => switch (val.tag()) { + .zero => return ty_bits, + .one => return 0, + + .int_u64 => { + const big = @ctz(val.castTag(.int_u64).?.data); + return if (big == 64) ty_bits else big; + }, + .int_i64 => { + @panic("TODO implement i64 Value ctz"); + }, + .int_big_positive => { + const bigint = val.castTag(.int_big_positive).?.asBigInt(); + return bigint.ctz(); + }, + .int_big_negative => { + @panic("TODO implement int_big_negative Value ctz"); + }, - .int_u64 => { - const big = @ctz(val.castTag(.int_u64).?.data); - return if (big == 64) ty_bits else big; - }, - .int_i64 => { - @panic("TODO implement i64 Value ctz"); - }, - .int_big_positive => { - const bigint = val.castTag(.int_big_positive).?.asBigInt(); - return bigint.ctz(); - }, - .int_big_negative => { - @panic("TODO implement int_big_negative Value ctz"); - }, + .the_only_possible_value => { + assert(ty_bits == 0); + return ty_bits; + }, - .the_only_possible_value => { - assert(ty_bits == 0); - return ty_bits; - }, + .lazy_align, .lazy_size => { + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; + return bigint.ctz(); + }, - .lazy_align, .lazy_size => { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; - return bigint.ctz(); + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| return int.big_int.ctz(), + else => unreachable, }, - - else => unreachable, } } pub fn popCount(val: Value, ty: Type, mod: *const Module) u64 { assert(!val.isUndef()); - switch (val.tag()) { - .zero, .bool_false => return 0, - .one, .bool_true => return 1, + switch (val.ip_index) { + .bool_false => return 0, + .bool_true => return 1, + .none => switch (val.tag()) { + .zero => return 0, + .one => return 1, - .int_u64 => return @popCount(val.castTag(.int_u64).?.data), + .int_u64 => return @popCount(val.castTag(.int_u64).?.data), - else => { - const info = ty.intInfo(mod); + else => { + const info = ty.intInfo(mod); - var buffer: Value.BigIntSpace = undefined; - const int = val.toBigInt(&buffer, mod); - return @intCast(u64, int.popCount(info.bits)); + var buffer: Value.BigIntSpace = undefined; + const int = val.toBigInt(&buffer, mod); + return @intCast(u64, int.popCount(info.bits)); + }, + }, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| { + const info = ty.intInfo(mod); + return int.big_int.popCount(info.bits); + }, + else => unreachable, }, } } @@ -1933,37 +1700,42 @@ pub const Value = struct { /// Returns the number of bits the value requires to represent stored in twos complement form. pub fn intBitCountTwosComp(self: Value, mod: *const Module) usize { const target = mod.getTarget(); - switch (self.tag()) { - .zero, - .bool_false, - .the_only_possible_value, - => return 0, - - .one, - .bool_true, - => return 1, + switch (self.ip_index) { + .bool_false => return 0, + .bool_true => return 1, + .none => switch (self.tag()) { + .zero, + .the_only_possible_value, + => return 0, - .int_u64 => { - const x = self.castTag(.int_u64).?.data; - if (x == 0) return 0; - return @intCast(usize, std.math.log2(x) + 1); - }, - .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(), - .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(), + .one => return 1, - .decl_ref_mut, - .comptime_field_ptr, - .extern_fn, - .decl_ref, - .function, - .variable, - .eu_payload_ptr, - .opt_payload_ptr, - => return target.ptrBitWidth(), + .int_u64 => { + const x = self.castTag(.int_u64).?.data; + if (x == 0) return 0; + return @intCast(usize, std.math.log2(x) + 1); + }, + .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(), + .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(), + + .decl_ref_mut, + .comptime_field_ptr, + .extern_fn, + .decl_ref, + .function, + .variable, + .eu_payload_ptr, + .opt_payload_ptr, + => return target.ptrBitWidth(), - else => { - var buffer: BigIntSpace = undefined; - return self.toBigInt(&buffer, mod).bitCountTwosComp(); + else => { + var buffer: BigIntSpace = undefined; + return self.toBigInt(&buffer, mod).bitCountTwosComp(); + }, + }, + else => switch (mod.intern_pool.indexToKey(self.ip_index)) { + .int => |int| return int.big_int.bitCountTwosComp(), + else => unreachable, }, } } @@ -2008,82 +1780,88 @@ pub const Value = struct { mod: *const Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { - return switch (lhs.tag()) { - .zero, - .bool_false, - .the_only_possible_value, - => .eq, + switch (lhs.ip_index) { + .bool_false => return .eq, + .bool_true => return .gt, + .none => return switch (lhs.tag()) { + .zero, + .the_only_possible_value, + => .eq, - .one, - .bool_true, - .decl_ref, - .decl_ref_mut, - .comptime_field_ptr, - .extern_fn, - .function, - .variable, - => .gt, + .one, + .decl_ref, + .decl_ref_mut, + .comptime_field_ptr, + .extern_fn, + .function, + .variable, + => .gt, + + .enum_field_index => return std.math.order(lhs.castTag(.enum_field_index).?.data, 0), + .runtime_value => { + // This is needed to correctly handle hashing the value. + // Checks in Sema should prevent direct comparisons from reaching here. + const val = lhs.castTag(.runtime_value).?.data; + return val.orderAgainstZeroAdvanced(mod, opt_sema); + }, + .int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0), + .int_i64 => std.math.order(lhs.castTag(.int_i64).?.data, 0), + .int_big_positive => lhs.castTag(.int_big_positive).?.asBigInt().orderAgainstScalar(0), + .int_big_negative => lhs.castTag(.int_big_negative).?.asBigInt().orderAgainstScalar(0), + + .lazy_align => { + const ty = lhs.castTag(.lazy_align).?.data; + const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; + if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => unreachable, + else => |e| return e, + }) { + return .gt; + } else { + return .eq; + } + }, + .lazy_size => { + const ty = lhs.castTag(.lazy_size).?.data; + const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; + if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => unreachable, + else => |e| return e, + }) { + return .gt; + } else { + return .eq; + } + }, - .enum_field_index => return std.math.order(lhs.castTag(.enum_field_index).?.data, 0), - .runtime_value => { - // This is needed to correctly handle hashing the value. - // Checks in Sema should prevent direct comparisons from reaching here. - const val = lhs.castTag(.runtime_value).?.data; - return val.orderAgainstZeroAdvanced(mod, opt_sema); - }, - .int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0), - .int_i64 => std.math.order(lhs.castTag(.int_i64).?.data, 0), - .int_big_positive => lhs.castTag(.int_big_positive).?.asBigInt().orderAgainstScalar(0), - .int_big_negative => lhs.castTag(.int_big_negative).?.asBigInt().orderAgainstScalar(0), + .float_16 => std.math.order(lhs.castTag(.float_16).?.data, 0), + .float_32 => std.math.order(lhs.castTag(.float_32).?.data, 0), + .float_64 => std.math.order(lhs.castTag(.float_64).?.data, 0), + .float_80 => std.math.order(lhs.castTag(.float_80).?.data, 0), + .float_128 => std.math.order(lhs.castTag(.float_128).?.data, 0), + + .elem_ptr => { + const elem_ptr = lhs.castTag(.elem_ptr).?.data; + switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(mod, opt_sema)) { + .lt => unreachable, + .gt => return .gt, + .eq => { + if (elem_ptr.index == 0) { + return .eq; + } else { + return .gt; + } + }, + } + }, - .lazy_align => { - const ty = lhs.castTag(.lazy_align).?.data; - const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => unreachable, - else => |e| return e, - }) { - return .gt; - } else { - return .eq; - } - }, - .lazy_size => { - const ty = lhs.castTag(.lazy_size).?.data; - const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => unreachable, - else => |e| return e, - }) { - return .gt; - } else { - return .eq; - } + else => unreachable, }, - - .float_16 => std.math.order(lhs.castTag(.float_16).?.data, 0), - .float_32 => std.math.order(lhs.castTag(.float_32).?.data, 0), - .float_64 => std.math.order(lhs.castTag(.float_64).?.data, 0), - .float_80 => std.math.order(lhs.castTag(.float_80).?.data, 0), - .float_128 => std.math.order(lhs.castTag(.float_128).?.data, 0), - - .elem_ptr => { - const elem_ptr = lhs.castTag(.elem_ptr).?.data; - switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(mod, opt_sema)) { - .lt => unreachable, - .gt => return .gt, - .eq => { - if (elem_ptr.index == 0) { - return .eq; - } else { - return .gt; - } - }, - } + else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) { + .int => |int| return int.big_int.orderAgainstScalar(0), + else => unreachable, }, - - else => unreachable, - }; + } } /// Asserts the value is comparable. @@ -2293,12 +2071,14 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!bool { + if (a.ip_index != .none or b.ip_index != .none) return a.ip_index == b.ip_index; + const target = mod.getTarget(); const a_tag = a.tag(); const b_tag = b.tag(); if (a_tag == b_tag) switch (a_tag) { .undef => return true, - .void_value, .null_value, .the_only_possible_value, .empty_struct_value => return true, + .null_value, .the_only_possible_value, .empty_struct_value => return true, .enum_literal => { const a_name = a.castTag(.enum_literal).?.data; const b_name = b.castTag(.enum_literal).?.data; @@ -2574,6 +2354,13 @@ pub const Value = struct { /// This function is used by hash maps and so treats floating-point NaNs as equal /// to each other, and not equal to other floating-point values. pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { + if (val.ip_index != .none) { + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. + std.hash.autoHash(hasher, val.ip_index); + return; + } const zig_ty_tag = ty.zigTypeTag(mod); std.hash.autoHash(hasher, zig_ty_tag); if (val.isUndef()) return; @@ -2908,8 +2695,6 @@ pub const Value = struct { .int_i64, .int_big_positive, .int_big_negative, - .bool_false, - .bool_true, .the_only_possible_value, .lazy_align, .lazy_size, @@ -3051,15 +2836,6 @@ pub const Value = struct { .opt_payload => return val.castTag(.opt_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), .eu_payload => return val.castTag(.eu_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), - // These values will implicitly be treated as `repeated`. - .zero, - .one, - .bool_false, - .bool_true, - .int_i64, - .int_u64, - => return val, - else => unreachable, } } @@ -3272,13 +3048,10 @@ pub const Value = struct { // in which case the value 0 is null and other values are non-null. .zero, - .bool_false, .the_only_possible_value, => true, - .one, - .bool_true, - => false, + .one => false, .int_u64, .int_i64, @@ -5419,11 +5192,7 @@ pub const Value = struct { } pub fn isGenericPoison(val: Value) bool { - return switch (val.ip_index) { - .generic_poison => true, - .none => val.tag() == .generic_poison, - else => false, - }; + return val.ip_index == .generic_poison; } /// This type is not copyable since it may contain pointers to its inner data. @@ -5673,10 +5442,13 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &negative_one_payload.base }, }; pub const undef = initTag(.undef); - pub const @"void" = initTag(.void_value); + pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; pub const @"null" = initTag(.null_value); - pub const @"false" = initTag(.bool_false); - pub const @"true" = initTag(.bool_true); + pub const @"false": Value = .{ .ip_index = .bool_false, .legacy = undefined }; + pub const @"true": Value = .{ .ip_index = .bool_true, .legacy = undefined }; + + pub const generic_poison: Value = .{ .ip_index = .generic_poison, .legacy = undefined }; + pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type, .legacy = undefined }; pub fn makeBool(x: bool) Value { return if (x) Value.true else Value.false; From fb16ad3add77eff23f9c5dbaf802fdecfb4c8cc0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 19:20:23 -0700 Subject: [PATCH 016/205] Type: update to use InternPool for some methods --- src/type.zig | 40 ++++++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/src/type.zig b/src/type.zig index 7c1414a1ebea..0eff51251d1f 100644 --- a/src/type.zig +++ b/src/type.zig @@ -5921,32 +5921,44 @@ pub const Type = struct { }; pub fn isTuple(ty: Type) bool { - return switch (ty.tag()) { - .tuple, .empty_struct_literal => true, - .@"struct" => ty.castTag(.@"struct").?.data.is_tuple, - else => false, + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple, .empty_struct_literal => true, + .@"struct" => ty.castTag(.@"struct").?.data.is_tuple, + else => false, + }, + else => false, // TODO }; } pub fn isAnonStruct(ty: Type) bool { - return switch (ty.tag()) { - .anon_struct, .empty_struct_literal => true, - else => false, + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .anon_struct, .empty_struct_literal => true, + else => false, + }, + else => false, // TODO }; } pub fn isTupleOrAnonStruct(ty: Type) bool { - return switch (ty.tag()) { - .tuple, .empty_struct_literal, .anon_struct => true, - .@"struct" => ty.castTag(.@"struct").?.data.is_tuple, - else => false, + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple, .empty_struct_literal, .anon_struct => true, + .@"struct" => ty.castTag(.@"struct").?.data.is_tuple, + else => false, + }, + else => false, // TODO }; } pub fn isSimpleTuple(ty: Type) bool { - return switch (ty.tag()) { - .tuple, .empty_struct_literal => true, - else => false, + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple, .empty_struct_literal => true, + else => false, + }, + else => false, // TODO }; } From 85c69c51945d7fb5d4cd2dea03fdb7915ecc55fa Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 20:04:47 -0700 Subject: [PATCH 017/205] Type.isSlice: make it InternPool aware --- src/InternPool.zig | 50 ++++- src/Sema.zig | 56 ++--- src/TypedValue.zig | 2 +- src/arch/aarch64/abi.zig | 2 +- src/arch/arm/abi.zig | 2 +- src/arch/riscv64/abi.zig | 2 +- src/arch/wasm/CodeGen.zig | 20 +- src/arch/wasm/abi.zig | 2 +- src/arch/x86_64/CodeGen.zig | 4 +- src/codegen.zig | 10 +- src/codegen/c.zig | 16 +- src/codegen/llvm.zig | 15 +- src/codegen/spirv.zig | 4 +- src/link/Dwarf.zig | 2 +- src/type.zig | 433 ++++++++++++++++++------------------ src/value.zig | 8 +- 16 files changed, 341 insertions(+), 287 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 6345d36f261e..3ecc18c4266e 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -629,7 +629,7 @@ pub const Tag = enum(u8) { /// A vector type. /// data is payload to Vector. type_vector, - /// A pointer type along with all its bells and whistles. + /// A fully explicitly specified pointer type. /// data is payload to Pointer. type_pointer, /// An optional type. @@ -682,13 +682,13 @@ pub const Tag = enum(u8) { /// An enum tag identified by a negative integer value. /// data is a limbs index to Int. enum_tag_negative, - /// A float value that can be represented by f32. + /// An f32 value. /// data is float value bitcasted to u32. float_f32, - /// A float value that can be represented by f64. + /// An f64 value. /// data is payload index to Float64. float_f64, - /// A float value that can be represented by f128. + /// An f128 value. /// data is payload index to Float128. float_f128, /// An extern function. @@ -871,7 +871,47 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .simple_type => .{ .simple_type = @intToEnum(SimpleType, data) }, .simple_value => .{ .simple_value = @intToEnum(SimpleValue, data) }, - else => @panic("TODO"), + .type_vector => { + const vector_info = ip.extraData(Vector, data); + return .{ .vector_type = .{ + .len = vector_info.len, + .child = vector_info.child, + } }; + }, + + .type_pointer => { + const ptr_info = ip.extraData(Pointer, data); + return .{ .ptr_type = .{ + .elem_type = ptr_info.child, + .sentinel = ptr_info.sentinel, + .alignment = ptr_info.flags.alignment, + .size = ptr_info.flags.size, + .is_const = ptr_info.flags.is_const, + .is_volatile = ptr_info.flags.is_volatile, + .is_allowzero = ptr_info.flags.is_allowzero, + .address_space = ptr_info.flags.address_space, + } }; + }, + + .type_optional => .{ .optional_type = .{ .payload_type = @intToEnum(Index, data) } }, + + .type_error_union => @panic("TODO"), + .type_enum_simple => @panic("TODO"), + .simple_internal => @panic("TODO"), + .int_small_u32 => @panic("TODO"), + .int_small_i32 => @panic("TODO"), + .int_small_usize => @panic("TODO"), + .int_small_comptime_unsigned => @panic("TODO"), + .int_small_comptime_signed => @panic("TODO"), + .int_positive => @panic("TODO"), + .int_negative => @panic("TODO"), + .enum_tag_positive => @panic("TODO"), + .enum_tag_negative => @panic("TODO"), + .float_f32 => @panic("TODO"), + .float_f64 => @panic("TODO"), + .float_f128 => @panic("TODO"), + .extern_func => @panic("TODO"), + .func => @panic("TODO"), }; } diff --git a/src/Sema.zig b/src/Sema.zig index 65475104aae0..0eecda5d1666 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2030,7 +2030,7 @@ fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); - if (ty.isSlice()) { + if (ty.isSlice(mod)) { try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)}); } break :msg msg; @@ -10359,7 +10359,7 @@ fn zirSwitchCond( .ErrorSet, .Enum, => { - if (operand_ty.isSlice()) { + if (operand_ty.isSlice(mod)) { return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(sema.mod)}); } if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| { @@ -12017,7 +12017,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ty = try sema.resolveTypeFields(unresolved_ty); const has_field = hf: { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { if (mem.eql(u8, field_name, "ptr")) break :hf true; if (mem.eql(u8, field_name, "len")) break :hf true; break :hf false; @@ -20020,8 +20020,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithOwnedErrorMsg(msg); } - const dest_is_slice = dest_ty.isSlice(); - const operand_is_slice = operand_ty.isSlice(); + const dest_is_slice = dest_ty.isSlice(mod); + const operand_is_slice = operand_ty.isSlice(mod); if (dest_is_slice and !operand_is_slice) { return sema.fail(block, dest_ty_src, "illegal pointer cast to slice", .{}); } @@ -20274,14 +20274,14 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A Type.usize, Value.initPayload(&val_payload.base), ); - const actual_ptr = if (ptr_ty.isSlice()) + const actual_ptr = if (ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr, ptr_ty) else ptr; const ptr_int = try block.addUnOp(.ptrtoint, actual_ptr); const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); - const ok = if (ptr_ty.isSlice()) ok: { + const ok = if (ptr_ty.isSlice(mod)) ok: { const len = try sema.analyzeSliceLen(block, ptr_src, ptr); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bit_or, len_zero, is_aligned); @@ -22336,7 +22336,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // Change the src from slice to a many pointer, to avoid multiple ptr // slice extractions in AIR instructions. const new_src_ptr_ty = sema.typeOf(new_src_ptr); - if (new_src_ptr_ty.isSlice()) { + if (new_src_ptr_ty.isSlice(mod)) { new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty); } } else if (dest_len == .none and len_val == null) { @@ -22344,7 +22344,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_ptr_ptr = try sema.analyzeRef(block, dest_src, new_dest_ptr); new_dest_ptr = try sema.analyzeSlice(block, dest_src, dest_ptr_ptr, .zero, src_len, .none, .unneeded, dest_src, dest_src, dest_src, false); const new_src_ptr_ty = sema.typeOf(new_src_ptr); - if (new_src_ptr_ty.isSlice()) { + if (new_src_ptr_ty.isSlice(mod)) { new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty); } } @@ -22363,7 +22363,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // Extract raw pointer from dest slice. The AIR instructions could support them, but // it would cause redundant machine code instructions. const new_dest_ptr_ty = sema.typeOf(new_dest_ptr); - const raw_dest_ptr = if (new_dest_ptr_ty.isSlice()) + const raw_dest_ptr = if (new_dest_ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty) else new_dest_ptr; @@ -23383,7 +23383,7 @@ fn validateExternType( .Float, .AnyFrame, => return true, - .Pointer => return !(ty.isSlice() or try sema.typeRequiresComptime(ty)), + .Pointer => return !(ty.isSlice(mod) or try sema.typeRequiresComptime(ty)), .Int => switch (ty.intInfo(mod).bits) { 8, 16, 32, 64, 128 => return true, else => return false, @@ -23448,7 +23448,7 @@ fn explainWhyTypeIsNotExtern( => return, .Pointer => { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{}); } else { const pointee_ty = ty.childType(); @@ -23523,7 +23523,7 @@ fn validatePackedType(ty: Type, mod: *const Module) bool { .Vector, .Enum, => return true, - .Pointer => return !ty.isSlice(), + .Pointer => return !ty.isSlice(mod), .Struct, .Union => return ty.containerLayout() == .Packed, } } @@ -23803,7 +23803,7 @@ fn panicSentinelMismatch( const expected_sentinel = try sema.addConstant(sentinel_ty, expected_sentinel_val); const ptr_ty = sema.typeOf(ptr); - const actual_sentinel = if (ptr_ty.isSlice()) + const actual_sentinel = if (ptr_ty.isSlice(mod)) try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index) else blk: { const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null); @@ -24064,7 +24064,7 @@ fn fieldVal( const msg = msg: { const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - if (child_type.isSlice()) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); + if (child_type.isSlice(mod)) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); break :msg msg; }; @@ -24140,7 +24140,7 @@ fn fieldPtr( ); } }, - .Pointer => if (inner_ty.isSlice()) { + .Pointer => if (inner_ty.isSlice(mod)) { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else @@ -25743,8 +25743,8 @@ fn coerceExtra( } }; break :pointer; } - if (dest_ty.isSlice()) break :to_anyopaque; - if (inst_ty.isSlice()) { + if (dest_ty.isSlice(mod)) break :to_anyopaque; + if (inst_ty.isSlice(mod)) { in_memory_result = .{ .slice_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, @@ -25885,7 +25885,7 @@ fn coerceExtra( return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src); }, .Many => p: { - if (!inst_ty.isSlice()) break :p; + if (!inst_ty.isSlice(mod)) break :p; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; const inst_info = inst_ty.ptrInfo().data; @@ -26651,7 +26651,7 @@ fn coerceInMemoryAllowed( } // Slices - if (dest_ty.isSlice() and src_ty.isSlice()) { + if (dest_ty.isSlice(mod) and src_ty.isSlice(mod)) { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src); } @@ -27744,7 +27744,7 @@ fn beginComptimePtrMutation( ); }, .Pointer => { - assert(parent.ty.isSlice()); + assert(parent.ty.isSlice(mod)); val_ptr.* = try Value.Tag.slice.create(arena, .{ .ptr = Value.undef, .len = Value.undef, @@ -28187,7 +28187,7 @@ fn beginComptimePtrLoad( break :blk deref; } - if (field_ptr.container_ty.isSlice()) { + if (field_ptr.container_ty.isSlice(mod)) { const slice_val = tv.val.castTag(.slice).?.data; deref.pointee = switch (field_index) { Value.Payload.Slice.ptr_index => TypedValue{ @@ -28442,13 +28442,13 @@ fn coerceCompatiblePtrs( if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { - const actual_ptr = if (inst_ty.isSlice()) + const actual_ptr = if (inst_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty) else inst; const ptr_int = try block.addUnOp(.ptrtoint, actual_ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); - const ok = if (inst_ty.isSlice()) ok: { + const ok = if (inst_ty.isSlice(mod)) ok: { const len = try sema.analyzeSliceLen(block, inst_src, inst); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bit_or, len_zero, is_non_zero); @@ -29548,7 +29548,7 @@ fn analyzeSlice( else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}), } - const ptr = if (slice_ty.isSlice()) + const ptr = if (slice_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty) else ptr_or_slice; @@ -29605,7 +29605,7 @@ fn analyzeSlice( } break :e try sema.addConstant(Type.usize, len_val); - } else if (slice_ty.isSlice()) { + } else if (slice_ty.isSlice(mod)) { if (!end_is_len) { const end = if (by_length) end: { const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); @@ -29778,7 +29778,7 @@ fn analyzeSlice( try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } - if (slice_ty.isSlice()) { + if (slice_ty.isSlice(mod)) { const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); const actual_len = if (slice_ty.sentinel() == null) slice_len_inst @@ -29840,7 +29840,7 @@ fn analyzeSlice( // requirement: end <= len const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array) try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel()) - else if (slice_ty.isSlice()) blk: { + else if (slice_ty.isSlice(mod)) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the // underlying value data includes the sentinel diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 7302f42e570f..877a8f5f4cb8 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -259,7 +259,7 @@ pub fn print( } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; return writer.print(".{s}", .{field_name}); - } else if (field_ptr.container_ty.isSlice()) { + } else if (field_ptr.container_ty.isSlice(mod)) { switch (field_ptr.field_index) { Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"), Value.Payload.Slice.len_index => return writer.writeAll(".len"), diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index cbfd6a11717c..821afd27aef3 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -52,7 +52,7 @@ pub fn classifyType(ty: Type, mod: *const Module) Class { return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + std.debug.assert(!ty.isSlice(mod)); return .byval; }, .ErrorUnion, diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index ca7fff7d0887..eee4b41eefab 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -94,7 +94,7 @@ pub fn classifyType(ty: Type, mod: *const Module, ctx: Context) Class { return .byval; }, .Pointer => { - assert(!ty.isSlice()); + assert(!ty.isSlice(mod)); return .byval; }, .ErrorUnion, diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index c9e0873bcecf..ac0d8d3e32d1 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -52,7 +52,7 @@ pub fn classifyType(ty: Type, mod: *const Module) Class { return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + std.debug.assert(!ty.isSlice(mod)); return .byval; }, .ErrorUnion, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2c1e8aa36db9..bb3f1f769db7 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1773,7 +1773,7 @@ fn isByRef(ty: Type, mod: *const Module) bool { }, .Pointer => { // Slices act like struct and will be passed by reference - if (ty.isSlice()) return true; + if (ty.isSlice(mod)) return true; return false; }, } @@ -2396,7 +2396,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE }, }, .Pointer => { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { // store pointer first // lower it to the stack so we do not have to store rhs into a local first try func.emitWValue(lhs); @@ -3010,11 +3010,11 @@ fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.In } fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { - if (tv.ty.isSlice()) { + const mod = func.bin_file.base.options.module.?; + if (tv.ty.isSlice(mod)) { return WValue{ .memory = try func.bin_file.lowerUnnamedConst(tv, decl_index) }; } - const mod = func.bin_file.base.options.module.?; const decl = mod.declPtr(decl_index); if (decl.ty.zigTypeTag(mod) != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime(mod)) { return WValue{ .imm32 = 0xaaaaaaaa }; @@ -4182,7 +4182,7 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod }; try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 }); } - } else if (payload_ty.isSlice()) { + } else if (payload_ty.isSlice(mod)) { switch (func.arch()) { .wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }), .wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }), @@ -4455,10 +4455,11 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const ptr_ty = func.typeOf(un_op); - const result = if (ptr_ty.isSlice()) + const result = if (ptr_ty.isSlice(mod)) try func.slicePtr(operand) else switch (operand) { // for stack offset, return a pointer to this offset. @@ -4479,7 +4480,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_size = elem_ty.abiSize(mod); // load pointer onto the stack - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { _ = try func.load(ptr, Type.usize, 0); } else { try func.lowerToStack(ptr); @@ -4518,7 +4519,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const index = try func.resolveInst(bin_op.rhs); // load pointer onto the stack - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { _ = try func.load(ptr, Type.usize, 0); } else { try func.lowerToStack(ptr); @@ -5441,7 +5442,8 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue { - if (ptr_ty.isSlice()) { + const mod = func.bin_file.base.options.module.?; + if (ptr_ty.isSlice(mod)) { return func.slicePtr(ptr); } else { return ptr; diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 7dd4425c01ea..c7819b0fa654 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -60,7 +60,7 @@ pub fn classifyType(ty: Type, mod: *const Module) [2]Class { return direct; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + std.debug.assert(!ty.isSlice(mod)); return direct; }, .Union => { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 3e0ca4831b30..ad67a0db3d78 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8688,7 +8688,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) - .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } else .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; @@ -8781,7 +8781,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) - .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } else .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; diff --git a/src/codegen.zig b/src/codegen.zig index a3ecf88d50de..c9e2c6c265da 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -317,11 +317,11 @@ pub fn generateSymbol( switch (target.ptrBitWidth()) { 32 => { mem.writeInt(u32, try code.addManyAsArray(4), 0, endian); - if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 4); + if (typed_value.ty.isSlice(mod)) try code.appendNTimes(0xaa, 4); }, 64 => { mem.writeInt(u64, try code.addManyAsArray(8), 0, endian); - if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 8); + if (typed_value.ty.isSlice(mod)) try code.appendNTimes(0xaa, 8); }, else => unreachable, } @@ -845,7 +845,7 @@ fn lowerParentPtr( debug_output, reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag(mod)) { .Pointer => offset: { - assert(field_ptr.container_ty.isSlice()); + assert(field_ptr.container_ty.isSlice(mod)); var buf: Type.SlicePtrFieldTypeBuffer = undefined; break :offset switch (field_ptr.field_index) { 0 => 0, @@ -946,7 +946,7 @@ fn lowerDeclRef( ) CodeGenError!Result { const target = bin_file.options.target; const mod = bin_file.options.module.?; - if (typed_value.ty.isSlice()) { + if (typed_value.ty.isSlice(mod)) { // generate ptr var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); @@ -1174,7 +1174,7 @@ pub fn genTypedValue( const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); - if (!typed_value.ty.isSlice()) { + if (!typed_value.ty.isSlice(mod)) { if (typed_value.val.castTag(.variable)) |payload| { return genDeclRef(bin_file, src_loc, typed_value, payload.data.owner_decl); } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 327ccb011900..cd4f36e5740d 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -556,7 +556,7 @@ pub const DeclGen = struct { if (decl.val.castTag(.variable)) |var_payload| try dg.renderFwdDecl(decl_index, var_payload.data); - if (ty.isSlice()) { + if (ty.isSlice(mod)) { if (location == .StaticInitializer) { try writer.writeByte('{'); } else { @@ -603,7 +603,7 @@ pub const DeclGen = struct { fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void { const mod = dg.module; - if (!ptr_ty.isSlice()) { + if (!ptr_ty.isSlice(mod)) { try writer.writeByte('('); try dg.renderType(writer, ptr_ty); try writer.writeByte(')'); @@ -776,7 +776,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, repr_ty, Value.undef, .FunctionArgument); return writer.writeByte(')'); }, - .Pointer => if (ty.isSlice()) { + .Pointer => if (ty.isSlice(mod)) { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); @@ -1045,7 +1045,7 @@ pub const DeclGen = struct { return; }, .Pointer => switch (val.tag()) { - .null_value, .zero => if (ty.isSlice()) { + .null_value, .zero => if (ty.isSlice(mod)) { var slice_pl = Value.Payload.Slice{ .base = .{ .tag = .slice }, .data = .{ .ptr = val, .len = Value.undef }, @@ -5073,7 +5073,7 @@ fn airIsNull( TypedValue{ .ty = optional_ty, .val = Value.null } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) TypedValue{ .ty = payload_ty, .val = Value.zero } - else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload(mod)) rhs: { + else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf); break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null }; @@ -5864,6 +5864,7 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); @@ -5877,7 +5878,7 @@ fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(" = ("); try f.renderType(writer, inst_ty); try writer.writeByte(')'); - if (operand_ty.isSlice()) { + if (operand_ty.isSlice(mod)) { try f.writeCValueMember(writer, operand, .{ .identifier = "len" }); } else { try f.writeCValue(writer, operand, .Other); @@ -6272,7 +6273,8 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa } fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !void { - if (ptr_ty.isSlice()) { + const mod = f.object.dg.module; + if (ptr_ty.isSlice(mod)) { try f.writeCValueMember(writer, ptr, .{ .identifier = "ptr" }); } else { try f.writeCValue(writer, ptr, .FunctionArgument); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 1a092dff6925..5d9345c84f00 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1636,7 +1636,7 @@ pub const Object = struct { return ptr_di_ty; } - if (ty.isSlice()) { + if (ty.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = ty.slicePtrFieldType(&buf); const len_ty = Type.usize; @@ -2833,7 +2833,7 @@ pub const DeclGen = struct { }, .Bool => return dg.context.intType(1), .Pointer => { - if (t.isSlice()) { + if (t.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_type = t.slicePtrFieldType(&buf); @@ -4110,7 +4110,7 @@ pub const DeclGen = struct { } }, .Pointer => { - assert(parent_ty.isSlice()); + assert(parent_ty.isSlice(mod)); const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(field_index, .False), @@ -4184,7 +4184,7 @@ pub const DeclGen = struct { decl_index: Module.Decl.Index, ) Error!*llvm.Value { const mod = self.module; - if (tv.ty.isSlice()) { + if (tv.ty.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = tv.ty.slicePtrFieldType(&buf); var slice_len: Value.Payload.U64 = .{ @@ -5794,7 +5794,8 @@ pub const FuncGen = struct { } fn sliceOrArrayPtr(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { - if (ty.isSlice()) { + const mod = fg.dg.module; + if (ty.isSlice(mod)) { return fg.builder.buildExtractValue(ptr, 0, ""); } else { return ptr; @@ -6669,7 +6670,7 @@ pub const FuncGen = struct { self.builder.buildLoad(optional_llvm_ty, operand, "") else operand; - if (payload_ty.isSlice()) { + if (payload_ty.isSlice(mod)) { const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(&slice_buf)); @@ -10864,7 +10865,7 @@ const ParamTypeIterator = struct { it.zig_index += 1; it.llvm_index += 1; var buf: Type.Payload.ElemType = undefined; - if (ty.isSlice() or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(&buf).isSlice())) { + if (ty.isSlice(mod) or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(&buf).isSlice(mod))) { it.llvm_index += 1; return .slice; } else if (isByRef(ty, mod)) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 417a8035b53e..f69c6cb31746 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2980,12 +2980,12 @@ pub const DeclGen = struct { // Pointer payload represents nullability: pointer or slice. var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = if (payload_ty.isSlice()) + const ptr_ty = if (payload_ty.isSlice(mod)) payload_ty.slicePtrFieldType(&ptr_buf) else payload_ty; - const ptr_id = if (payload_ty.isSlice()) + const ptr_id = if (payload_ty.isSlice(mod)) try self.extractField(Type.bool, operand_id, 0) else operand_id; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 9c6e54ea9846..682431203e0d 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -258,7 +258,7 @@ pub const DeclState = struct { } }, .Pointer => { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { // Slices are structs: struct { .ptr = *, .len = N } const ptr_bits = target.ptrBitWidth(); const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8)); diff --git a/src/type.zig b/src/type.zig index 0eff51251d1f..f05c5e15e8f5 100644 --- a/src/type.zig +++ b/src/type.zig @@ -229,7 +229,7 @@ pub const Type = struct { .Frame, => false, - .Pointer => !ty.isSlice() and (is_equality_cmp or ty.isCPtr()), + .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr()), .Optional => { if (!is_equality_cmp) return false; var buf: Payload.ElemType = undefined; @@ -369,209 +369,212 @@ pub const Type = struct { } pub fn ptrInfo(self: Type) Payload.Pointer { - switch (self.tag()) { - .single_const_pointer_to_comptime_int => return .{ .data = .{ - .pointee_type = Type.comptime_int, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .const_slice_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .const_slice_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = Value.zero, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .single_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .single_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .One, - } }, - .many_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_const_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_const_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = Value.zero, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .many_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Many, - } }, - .c_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = true, - .mutable = false, - .@"volatile" = false, - .size = .C, - } }, - .c_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = true, - .mutable = true, - .@"volatile" = false, - .size = .C, - } }, - .const_slice => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .mut_slice => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Slice, - } }, - - .pointer => return self.castTag(.pointer).?.*, - - .optional_single_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .One, - } }, - .optional_single_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - return child_type.ptrInfo(); - }, + switch (self.ip_index) { + .none => switch (self.tag()) { + .single_const_pointer_to_comptime_int => return .{ .data = .{ + .pointee_type = Type.comptime_int, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .One, + } }, + .const_slice_u8 => return .{ .data = .{ + .pointee_type = Type.u8, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .Slice, + } }, + .const_slice_u8_sentinel_0 => return .{ .data = .{ + .pointee_type = Type.u8, + .sentinel = Value.zero, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .Slice, + } }, + .single_const_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .One, + } }, + .single_mut_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = true, + .@"volatile" = false, + .size = .One, + } }, + .many_const_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .Many, + } }, + .manyptr_const_u8 => return .{ .data = .{ + .pointee_type = Type.u8, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .Many, + } }, + .manyptr_const_u8_sentinel_0 => return .{ .data = .{ + .pointee_type = Type.u8, + .sentinel = Value.zero, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .Many, + } }, + .many_mut_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = true, + .@"volatile" = false, + .size = .Many, + } }, + .manyptr_u8 => return .{ .data = .{ + .pointee_type = Type.u8, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = true, + .@"volatile" = false, + .size = .Many, + } }, + .c_const_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = true, + .mutable = false, + .@"volatile" = false, + .size = .C, + } }, + .c_mut_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = true, + .mutable = true, + .@"volatile" = false, + .size = .C, + } }, + .const_slice => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .Slice, + } }, + .mut_slice => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = true, + .@"volatile" = false, + .size = .Slice, + } }, + + .pointer => return self.castTag(.pointer).?.*, + + .optional_single_mut_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = true, + .@"volatile" = false, + .size = .One, + } }, + .optional_single_const_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .One, + } }, + .optional => { + var buf: Payload.ElemType = undefined; + const child_type = self.optionalChild(&buf); + return child_type.ptrInfo(); + }, - else => unreachable, + else => unreachable, + }, + else => @panic("TODO"), } } @@ -3712,17 +3715,23 @@ pub const Type = struct { }; } - pub fn isSlice(self: Type) bool { - return switch (self.tag()) { - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => true, + pub fn isSlice(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .const_slice, + .mut_slice, + .const_slice_u8, + .const_slice_u8_sentinel_0, + => true, - .pointer => self.castTag(.pointer).?.data.size == .Slice, + .pointer => ty.castTag(.pointer).?.data.size == .Slice, - else => false, + else => false, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.size == .Slice, + else => false, + }, }; } diff --git a/src/value.zig b/src/value.zig index 396aab2012f7..cbf18c672c43 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1144,7 +1144,7 @@ pub const Value = struct { }, }, .Pointer => { - if (ty.isSlice()) return error.IllDefinedMemoryLayout; + if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout; if (val.isDeclRef()) return error.ReinterpretDeclRef; return val.writeToMemory(Type.usize, mod, buffer); }, @@ -1261,7 +1261,7 @@ pub const Value = struct { }, }, .Pointer => { - assert(!ty.isSlice()); // No well defined layout. + assert(!ty.isSlice(mod)); // No well defined layout. if (val.isDeclRef()) return error.ReinterpretDeclRef; return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset); }, @@ -1381,7 +1381,7 @@ pub const Value = struct { return Value.initPayload(&payload.base); }, .Pointer => { - assert(!ty.isSlice()); // No well defined layout. + assert(!ty.isSlice(mod)); // No well defined layout. return readFromMemory(Type.usize, mod, buffer, arena); }, .Optional => { @@ -1478,7 +1478,7 @@ pub const Value = struct { }, }, .Pointer => { - assert(!ty.isSlice()); // No well defined layout. + assert(!ty.isSlice(mod)); // No well defined layout. return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena); }, .Optional => { From 9d422bff18dbb92d3a6b8705c3dae7404a34bba6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 20:40:54 -0700 Subject: [PATCH 018/205] stage2: move all integer types to InternPool --- src/AstGen.zig | 23 ++- src/Sema.zig | 52 ----- src/arch/aarch64/CodeGen.zig | 22 +-- src/arch/arm/CodeGen.zig | 24 +-- src/arch/sparc64/CodeGen.zig | 6 +- src/arch/wasm/CodeGen.zig | 18 +- src/codegen/c/type.zig | 2 +- src/type.zig | 358 +++++------------------------------ 8 files changed, 106 insertions(+), 399 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index 6461b11d8075..edd609912787 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -10271,6 +10271,8 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.i32_type), as_ty | @enumToInt(Zir.Inst.Ref.u64_type), as_ty | @enumToInt(Zir.Inst.Ref.i64_type), + as_ty | @enumToInt(Zir.Inst.Ref.u128_type), + as_ty | @enumToInt(Zir.Inst.Ref.i128_type), as_ty | @enumToInt(Zir.Inst.Ref.usize_type), as_ty | @enumToInt(Zir.Inst.Ref.isize_type), as_ty | @enumToInt(Zir.Inst.Ref.c_char_type), @@ -10296,11 +10298,30 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.comptime_int_type), as_ty | @enumToInt(Zir.Inst.Ref.comptime_float_type), as_ty | @enumToInt(Zir.Inst.Ref.noreturn_type), + as_ty | @enumToInt(Zir.Inst.Ref.anyframe_type), as_ty | @enumToInt(Zir.Inst.Ref.null_type), as_ty | @enumToInt(Zir.Inst.Ref.undefined_type), + as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type), + as_ty | @enumToInt(Zir.Inst.Ref.atomic_order_type), + as_ty | @enumToInt(Zir.Inst.Ref.atomic_rmw_op_type), + as_ty | @enumToInt(Zir.Inst.Ref.calling_convention_type), + as_ty | @enumToInt(Zir.Inst.Ref.address_space_type), + as_ty | @enumToInt(Zir.Inst.Ref.float_mode_type), + as_ty | @enumToInt(Zir.Inst.Ref.reduce_op_type), + as_ty | @enumToInt(Zir.Inst.Ref.call_modifier_type), + as_ty | @enumToInt(Zir.Inst.Ref.prefetch_options_type), + as_ty | @enumToInt(Zir.Inst.Ref.export_options_type), + as_ty | @enumToInt(Zir.Inst.Ref.extern_options_type), + as_ty | @enumToInt(Zir.Inst.Ref.type_info_type), + as_ty | @enumToInt(Zir.Inst.Ref.manyptr_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type), as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type), - as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type), + as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_sentinel_0_type), + as_ty | @enumToInt(Zir.Inst.Ref.anyerror_void_error_union_type), + as_ty | @enumToInt(Zir.Inst.Ref.generic_poison_type), + as_ty | @enumToInt(Zir.Inst.Ref.empty_struct_type), as_comptime_int | @enumToInt(Zir.Inst.Ref.zero), as_comptime_int | @enumToInt(Zir.Inst.Ref.one), as_bool | @enumToInt(Zir.Inst.Ref.bool_true), diff --git a/src/Sema.zig b/src/Sema.zig index 0eecda5d1666..738971930108 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -31478,19 +31478,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }; return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -32971,19 +32958,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }; switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .error_set_single, .error_set, .error_set_merged, @@ -33175,19 +33149,6 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } switch (ty.tag()) { - .u1 => return .u1_type, - .u8 => return .u8_type, - .i8 => return .i8_type, - .u16 => return .u16_type, - .u29 => return .u29_type, - .i16 => return .i16_type, - .u32 => return .u32_type, - .i32 => return .i32_type, - .u64 => return .u64_type, - .i64 => return .i64_type, - .u128 => return .u128_type, - .i128 => return .i128_type, - .manyptr_u8 => return .manyptr_u8_type, .manyptr_const_u8 => return .manyptr_const_u8_type, .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, @@ -33617,19 +33578,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } } return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 7098cf3f321b..503bbdbb02b1 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -2577,7 +2577,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; }, @@ -2720,7 +2720,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits <= 64) { @@ -2860,7 +2860,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else return self.fail("TODO implement mul_with_overflow for integers > u64/i64", .{}); @@ -2993,7 +2993,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else { @@ -3780,7 +3780,7 @@ fn genInlineMemset( const val_reg = switch (val) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.u8), val), + else => try self.copyToTmpRegister(Type.u8, val), }; const val_reg_lock = self.register_manager.lockReg(val_reg); defer if (val_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -4330,7 +4330,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = macho_file.getAtom(atom).getSymbolIndex().?; - try self.genSetReg(Type.initTag(.u64), .x30, .{ + try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .got, .sym_index = sym_index, @@ -4339,7 +4339,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = coff_file.getAtom(atom).getSymbolIndex().?; - try self.genSetReg(Type.initTag(.u64), .x30, .{ + try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .got, .sym_index = sym_index, @@ -4379,7 +4379,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier }); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); - try self.genSetReg(Type.initTag(.u64), .x30, .{ + try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .import, .sym_index = sym_index, @@ -4536,7 +4536,7 @@ fn cmp( var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - break :blk Type.initTag(.u1); + break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { @@ -4546,9 +4546,9 @@ fn cmp( .Float => return self.fail("TODO ARM cmp floats", .{}), .Enum => lhs_ty.intTagType(), .Int => lhs_ty, - .Bool => Type.initTag(.u1), + .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.initTag(.u16), + .ErrorSet => Type.u16, else => unreachable, }; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index bf94cf55a0e6..55ec0d412593 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1637,7 +1637,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits == 32) { @@ -1750,7 +1750,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits <= 32) { @@ -1848,7 +1848,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); // strb rdlo, [...] - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .register = rdlo }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .register = rdlo }); break :result MCValue{ .stack_offset = stack_offset }; } else { @@ -1983,7 +1983,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else { @@ -4086,7 +4086,7 @@ fn genInlineMemset( const val_reg = switch (val) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.u8), val), + else => try self.copyToTmpRegister(Type.u8, val), }; const val_reg_lock = self.register_manager.lockReg(val_reg); defer if (val_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -4485,7 +4485,7 @@ fn cmp( var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - break :blk Type.initTag(.u1); + break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { @@ -4495,9 +4495,9 @@ fn cmp( .Float => return self.fail("TODO ARM cmp floats", .{}), .Enum => lhs_ty.intTagType(), .Int => lhs_ty, - .Bool => Type.initTag(.u1), + .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.initTag(.u16), + .ErrorSet => Type.u16, else => unreachable, }; @@ -5367,7 +5367,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro 1, 4 => { const offset = if (math.cast(u12, stack_offset)) |imm| blk: { break :blk Instruction.Offset.imm(imm); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }), .none); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -5390,7 +5390,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro 2 => { const offset = if (stack_offset <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset })); _ = try self.addInst(.{ .tag = .strh, @@ -5769,7 +5769,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I 1, 4 => { const offset = if (math.cast(u12, stack_offset)) |imm| blk: { break :blk Instruction.Offset.imm(imm); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }), .none); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -5789,7 +5789,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I 2 => { const offset = if (stack_offset <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset })); _ = try self.addInst(.{ .tag = .strh, diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index a519b7323502..c565b6dc237b 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1436,14 +1436,14 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { .Vector => unreachable, // Handled by cmp_vector. .Enum => lhs_ty.intTagType(), .Int => lhs_ty, - .Bool => Type.initTag(.u1), + .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.initTag(.u16), + .ErrorSet => Type.u16, .Optional => blk: { var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - break :blk Type.initTag(.u1); + break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index bb3f1f769db7..7fc5dbc8255d 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -4272,7 +4272,7 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const non_null_bit = try func.allocStack(Type.initTag(.u1)); + const non_null_bit = try func.allocStack(Type.u1); try func.emitWValue(non_null_bit); try func.addImm32(1); try func.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 }); @@ -5195,7 +5195,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: // We store the final result in here that will be validated // if the optional is truly equal. - var result = try func.ensureAllocLocal(Type.initTag(.i32)); + var result = try func.ensureAllocLocal(Type.i32); defer result.free(func); try func.startBlock(.block, wasm.block_empty); @@ -5658,7 +5658,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); - try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); + try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } @@ -5717,13 +5717,13 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, break :blk WValue{ .stack = {} }; }; - var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1)); + var overflow_local = try overflow_bit.toLocal(func, Type.u1); defer overflow_local.free(func); const result_ptr = try func.allocStack(result_ty); try func.store(result_ptr, high_op_res, Type.u64, 0); try func.store(result_ptr, tmp_op, Type.u64, 8); - try func.store(result_ptr, overflow_local, Type.initTag(.u1), 16); + try func.store(result_ptr, overflow_local, Type.u1, 16); return result_ptr; } @@ -5774,13 +5774,13 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const shr = try func.binOp(result, rhs_final, lhs_ty, .shr); break :blk try func.cmp(.{ .stack = {} }, shr, lhs_ty, .neq); }; - var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1)); + var overflow_local = try overflow_bit.toLocal(func, Type.u1); defer overflow_local.free(func); const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); - try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); + try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } @@ -5800,7 +5800,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // We store the bit if it's overflowed or not in this. As it's zero-initialized // we only need to update it if an overflow (or underflow) occurred. - var overflow_bit = try func.ensureAllocLocal(Type.initTag(.u1)); + var overflow_bit = try func.ensureAllocLocal(Type.u1); defer overflow_bit.free(func); const int_info = lhs_ty.intInfo(mod); @@ -5955,7 +5955,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); - try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset); + try func.store(result_ptr, overflow_bit, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index d2487536701b..27fa997fd34a 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1471,7 +1471,7 @@ pub const CType = extern union { else info.pointee_type; - if (if (info.size == .C and pointee_ty.tag() == .u8) + if (if (info.size == .C and pointee_ty.ip_index == .u8_type) Tag.char.toIndex() else try lookup.typeToIndex(pointee_ty, .forward)) |child_idx| diff --git a/src/type.zig b/src/type.zig index f05c5e15e8f5..868ae4231be8 100644 --- a/src/type.zig +++ b/src/type.zig @@ -107,20 +107,6 @@ pub const Type = struct { } } switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - => return .Int, - .error_set, .error_set_single, .error_set_inferred, @@ -589,26 +575,6 @@ pub const Type = struct { if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true; switch (a.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - => { - if (b.zigTypeTag(mod) != .Int) return false; - if (b.isNamedInt()) return false; - const info_a = a.intInfo(mod); - const info_b = b.intInfo(mod); - return info_a.signedness == info_b.signedness and info_a.bits == info_b.bits; - }, - .error_set_inferred => { // Inferred error sets are only equal if both are inferred // and they share the same pointer. @@ -926,26 +892,6 @@ pub const Type = struct { return; } switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - => { - // Arbitrary sized integers. - std.hash.autoHash(hasher, std.builtin.TypeId.Int); - const info = ty.intInfo(mod); - std.hash.autoHash(hasher, info.signedness); - std.hash.autoHash(hasher, info.bits); - }, - .error_set, .error_set_single, .error_set_merged, @@ -1183,18 +1129,6 @@ pub const Type = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, .single_const_pointer_to_comptime_int, .const_slice_u8, .const_slice_u8_sentinel_0, @@ -1435,20 +1369,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - => return writer.writeAll(@tagName(t)), - .empty_struct, .empty_struct_literal => return writer.writeAll("struct {}"), .@"struct" => { @@ -1775,20 +1695,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - => try writer.writeAll(@tagName(t)), - .empty_struct_literal => try writer.writeAll("@TypeOf(.{})"), .empty_struct => { @@ -2057,16 +1963,6 @@ pub const Type = struct { pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { if (self.ip_index != .none) return self.ip_index.toValue(); switch (self.tag()) { - .u1 => return Value{ .ip_index = .u1_type, .legacy = undefined }, - .u8 => return Value{ .ip_index = .u8_type, .legacy = undefined }, - .i8 => return Value{ .ip_index = .i8_type, .legacy = undefined }, - .u16 => return Value{ .ip_index = .u16_type, .legacy = undefined }, - .u29 => return Value{ .ip_index = .u29_type, .legacy = undefined }, - .i16 => return Value{ .ip_index = .i16_type, .legacy = undefined }, - .u32 => return Value{ .ip_index = .u32_type, .legacy = undefined }, - .i32 => return Value{ .ip_index = .i32_type, .legacy = undefined }, - .u64 => return Value{ .ip_index = .u64_type, .legacy = undefined }, - .i64 => return Value{ .ip_index = .i64_type, .legacy = undefined }, .single_const_pointer_to_comptime_int => return Value{ .ip_index = .single_const_pointer_to_comptime_int_type, .legacy = undefined }, .const_slice_u8 => return Value{ .ip_index = .const_slice_u8_type, .legacy = undefined }, .const_slice_u8_sentinel_0 => return Value{ .ip_index = .const_slice_u8_sentinel_0_type, .legacy = undefined }, @@ -2162,19 +2058,6 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .const_slice_u8, .const_slice_u8_sentinel_0, .array_u8_sentinel_0, @@ -2404,19 +2287,6 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -2752,10 +2622,6 @@ pub const Type = struct { else => null, }; switch (ty.tag()) { - .u1, - .u8, - .i8, - .array_u8_sentinel_0, .array_u8, .@"opaque", @@ -2806,12 +2672,6 @@ pub const Type = struct { return AbiAlignmentAdvanced{ .scalar = @intCast(u32, alignment) }; }, - .i16, .u16 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(16, target) }, - .u29 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(29, target) }, - .i32, .u32 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(32, target) }, - .i64, .u64 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(64, target) }, - .u128, .i128 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(128, target) }, - .optional => { var buf: Payload.ElemType = undefined; const child_type = ty.optionalChild(&buf); @@ -3208,11 +3068,6 @@ pub const Type = struct { return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true); }, - .u1, - .u8, - .i8, - => return AbiSizeAdvanced{ .scalar = 1 }, - .array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data }, .array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 }, .array => { @@ -3293,12 +3148,6 @@ pub const Type = struct { .error_set_single, => return AbiSizeAdvanced{ .scalar = 2 }, - .i16, .u16 => return AbiSizeAdvanced{ .scalar = intAbiSize(16, target) }, - .u29 => return AbiSizeAdvanced{ .scalar = intAbiSize(29, target) }, - .i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) }, - .i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) }, - .u128, .i128 => return AbiSizeAdvanced{ .scalar = intAbiSize(128, target) }, - .optional => { var buf: Payload.ElemType = undefined; const child_type = ty.optionalChild(&buf); @@ -3497,14 +3346,6 @@ pub const Type = struct { .inferred_alloc_mut => unreachable, .@"opaque" => unreachable, - .u1 => return 1, - .u8, .i8 => return 8, - .i16, .u16 => return 16, - .u29 => return 29, - .i32, .u32 => return 32, - .i64, .u64 => return 64, - .u128, .i128 => return 128, - .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; if (struct_obj.layout != .Packed) { @@ -4398,47 +4239,25 @@ pub const Type = struct { /// Returns true if and only if the type is a fixed-width, signed integer. pub fn isSignedInt(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type.signedness == .signed, - .simple_type => |s| return switch (s) { - .c_char, .isize, .c_short, .c_int, .c_long, .c_longlong => true, + return switch (ty.ip_index) { + .c_char_type, .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true, + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| int_type.signedness == .signed, else => false, }, - else => return false, - }; - return switch (ty.tag()) { - .i8, - .i16, - .i32, - .i64, - .i128, - => true, - - else => false, }; } /// Returns true if and only if the type is a fixed-width, unsigned integer. pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type.signedness == .unsigned, - .simple_type => |s| return switch (s) { - .usize, .c_ushort, .c_uint, .c_ulong, .c_ulonglong => true, + return switch (ty.ip_index) { + .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true, + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| int_type.signedness == .unsigned, else => false, }, - else => return false, - }; - return switch (ty.tag()) { - .u1, - .u8, - .u16, - .u29, - .u32, - .u64, - .u128, - => true, - - else => false, }; } @@ -4459,19 +4278,6 @@ pub const Type = struct { while (true) switch (ty.ip_index) { .none => switch (ty.tag()) { - .u1 => return .{ .signedness = .unsigned, .bits = 1 }, - .u8 => return .{ .signedness = .unsigned, .bits = 8 }, - .i8 => return .{ .signedness = .signed, .bits = 8 }, - .u16 => return .{ .signedness = .unsigned, .bits = 16 }, - .i16 => return .{ .signedness = .signed, .bits = 16 }, - .u29 => return .{ .signedness = .unsigned, .bits = 29 }, - .u32 => return .{ .signedness = .unsigned, .bits = 32 }, - .i32 => return .{ .signedness = .signed, .bits = 32 }, - .u64 => return .{ .signedness = .unsigned, .bits = 64 }, - .i64 => return .{ .signedness = .signed, .bits = 64 }, - .u128 => return .{ .signedness = .unsigned, .bits = 128 }, - .i128 => return .{ .signedness = .signed, .bits = 128 }, - .enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty, .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, .enum_simple => { @@ -4664,50 +4470,34 @@ pub const Type = struct { } pub fn isNumeric(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => true, - .simple_type => |s| return switch (s) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - .comptime_int, - .comptime_float, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - => true, + return switch (ty.ip_index) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + .comptime_int_type, + .comptime_float_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => true, + + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => true, else => false, }, - else => false, - }; - return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - => true, - - else => false, }; } @@ -4785,19 +4575,6 @@ pub const Type = struct { }; while (true) switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .error_union, .error_set_single, .error_set, @@ -4995,19 +4772,6 @@ pub const Type = struct { }; return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -5764,19 +5528,6 @@ pub const Type = struct { /// See `zigTypeTag` for the function that corresponds to `std.builtin.TypeId`. pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - u1, - u8, - i8, - u16, - i16, - u29, - u32, - i32, - u64, - i64, - u128, - i128, - manyptr_u8, manyptr_const_u8, manyptr_const_u8_sentinel_0, @@ -5839,19 +5590,6 @@ pub const Type = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .single_const_pointer_to_comptime_int, .anyerror_void_error_union, .const_slice_u8, @@ -6203,19 +5941,19 @@ pub const Type = struct { }; }; - pub const @"u1" = initTag(.u1); - pub const @"u8" = initTag(.u8); - pub const @"u16" = initTag(.u16); - pub const @"u29" = initTag(.u29); - pub const @"u32" = initTag(.u32); - pub const @"u64" = initTag(.u64); - pub const @"u128" = initTag(.u128); - - pub const @"i8" = initTag(.i8); - pub const @"i16" = initTag(.i16); - pub const @"i32" = initTag(.i32); - pub const @"i64" = initTag(.i64); - pub const @"i128" = initTag(.i128); + pub const @"u1": Type = .{ .ip_index = .u1_type, .legacy = undefined }; + pub const @"u8": Type = .{ .ip_index = .u8_type, .legacy = undefined }; + pub const @"u16": Type = .{ .ip_index = .u16_type, .legacy = undefined }; + pub const @"u29": Type = .{ .ip_index = .u29_type, .legacy = undefined }; + pub const @"u32": Type = .{ .ip_index = .u32_type, .legacy = undefined }; + pub const @"u64": Type = .{ .ip_index = .u64_type, .legacy = undefined }; + pub const @"u128": Type = .{ .ip_index = .u128_type, .legacy = undefined }; + + pub const @"i8": Type = .{ .ip_index = .i8_type, .legacy = undefined }; + pub const @"i16": Type = .{ .ip_index = .i16_type, .legacy = undefined }; + pub const @"i32": Type = .{ .ip_index = .i32_type, .legacy = undefined }; + pub const @"i64": Type = .{ .ip_index = .i64_type, .legacy = undefined }; + pub const @"i128": Type = .{ .ip_index = .i128_type, .legacy = undefined }; pub const @"f16": Type = .{ .ip_index = .f16_type, .legacy = undefined }; pub const @"f32": Type = .{ .ip_index = .f32_type, .legacy = undefined }; From 5e636643d2a36c777a607b65cfd1abbb1822ad1e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 4 May 2023 20:30:25 -0700 Subject: [PATCH 019/205] stage2: move many Type encodings to InternPool Notably, `vector`. Additionally, all alternate encodings of `pointer`, `optional`, and `array`. --- src/Air.zig | 13 +- src/InternPool.zig | 92 +- src/Liveness.zig | 8 +- src/Liveness/Verify.zig | 2 +- src/Module.zig | 41 +- src/Sema.zig | 1075 ++++++++++----------- src/TypedValue.zig | 32 +- src/arch/aarch64/CodeGen.zig | 80 +- src/arch/arm/CodeGen.zig | 83 +- src/arch/riscv64/CodeGen.zig | 14 +- src/arch/sparc64/CodeGen.zig | 37 +- src/arch/wasm/CodeGen.zig | 168 ++-- src/arch/x86_64/CodeGen.zig | 314 +++--- src/arch/x86_64/abi.zig | 6 +- src/codegen.zig | 36 +- src/codegen/c.zig | 203 ++-- src/codegen/c/type.zig | 9 +- src/codegen/llvm.zig | 326 +++---- src/codegen/spirv.zig | 64 +- src/codegen/spirv/Module.zig | 3 +- src/link/Dwarf.zig | 11 +- src/link/Wasm.zig | 4 +- src/print_air.zig | 6 +- src/type.zig | 1770 +++++++++------------------------- src/value.zig | 208 ++-- 25 files changed, 1834 insertions(+), 2771 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 4124788605b0..64212d3b9af3 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1375,7 +1375,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .bool_to_int => return Type.u1, - .tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0), + .tag_name, .error_name => return Type.const_slice_u8_sentinel_0, .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); @@ -1384,18 +1384,21 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .slice_elem_val, .ptr_elem_val, .array_elem_val => { const ptr_ty = air.typeOf(datas[inst].bin_op.lhs, ip); - return ptr_ty.elemType(); + return ptr_ty.childTypeIp(ip); }, .atomic_load => { const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr, ip); - return ptr_ty.elemType(); + return ptr_ty.childTypeIp(ip); }, .atomic_rmw => { const ptr_ty = air.typeOf(datas[inst].pl_op.operand, ip); - return ptr_ty.elemType(); + return ptr_ty.childTypeIp(ip); }, - .reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand, ip).childType(), + .reduce, .reduce_optimized => { + const operand_ty = air.typeOf(datas[inst].reduce.operand, ip); + return ip.indexToKey(operand_ty.ip_index).vector_type.child.toType(); + }, .mul_add => return air.typeOf(datas[inst].pl_op.operand, ip), .select => { diff --git a/src/InternPool.zig b/src/InternPool.zig index 3ecc18c4266e..295a694e2aab 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -31,28 +31,10 @@ const KeyAdapter = struct { pub const Key = union(enum) { int_type: IntType, - ptr_type: struct { - elem_type: Index, - sentinel: Index = .none, - alignment: u16 = 0, - size: std.builtin.Type.Pointer.Size, - is_const: bool = false, - is_volatile: bool = false, - is_allowzero: bool = false, - address_space: std.builtin.AddressSpace = .generic, - }, - array_type: struct { - len: u64, - child: Index, - sentinel: Index, - }, - vector_type: struct { - len: u32, - child: Index, - }, - optional_type: struct { - payload_type: Index, - }, + ptr_type: PtrType, + array_type: ArrayType, + vector_type: VectorType, + opt_type: Index, error_union_type: struct { error_set_type: Index, payload_type: Index, @@ -87,6 +69,47 @@ pub const Key = union(enum) { pub const IntType = std.builtin.Type.Int; + pub const PtrType = struct { + elem_type: Index, + sentinel: Index = .none, + /// If zero use pointee_type.abiAlignment() + /// When creating pointer types, if alignment is equal to pointee type + /// abi alignment, this value should be set to 0 instead. + alignment: u16 = 0, + /// If this is non-zero it means the pointer points to a sub-byte + /// range of data, which is backed by a "host integer" with this + /// number of bytes. + /// When host_size=pointee_abi_size and bit_offset=0, this must be + /// represented with host_size=0 instead. + host_size: u16 = 0, + bit_offset: u16 = 0, + vector_index: VectorIndex = .none, + size: std.builtin.Type.Pointer.Size = .One, + is_const: bool = false, + is_volatile: bool = false, + is_allowzero: bool = false, + /// See src/target.zig defaultAddressSpace function for how to obtain + /// an appropriate value for this field. + address_space: std.builtin.AddressSpace = .generic, + + pub const VectorIndex = enum(u32) { + none = std.math.maxInt(u32), + runtime = std.math.maxInt(u32) - 1, + _, + }; + }; + + pub const ArrayType = struct { + len: u64, + child: Index, + sentinel: Index, + }; + + pub const VectorType = struct { + len: u32, + child: Index, + }; + pub fn hash32(key: Key) u32 { return @truncate(u32, key.hash64()); } @@ -106,7 +129,7 @@ pub const Key = union(enum) { .ptr_type, .array_type, .vector_type, - .optional_type, + .opt_type, .error_union_type, .simple_type, .simple_value, @@ -159,8 +182,8 @@ pub const Key = union(enum) { const b_info = b.vector_type; return std.meta.eql(a_info, b_info); }, - .optional_type => |a_info| { - const b_info = b.optional_type; + .opt_type => |a_info| { + const b_info = b.opt_type; return std.meta.eql(a_info, b_info); }, .error_union_type => |a_info| { @@ -220,7 +243,7 @@ pub const Key = union(enum) { .ptr_type, .array_type, .vector_type, - .optional_type, + .opt_type, .error_union_type, .simple_type, .struct_type, @@ -630,6 +653,7 @@ pub const Tag = enum(u8) { /// data is payload to Vector. type_vector, /// A fully explicitly specified pointer type. + /// TODO actually this is missing some stuff like bit_offset /// data is payload to Pointer. type_pointer, /// An optional type. @@ -893,7 +917,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }; }, - .type_optional => .{ .optional_type = .{ .payload_type = @intToEnum(Index, data) } }, + .type_optional => .{ .opt_type = @intToEnum(Index, data) }, .type_error_union => @panic("TODO"), .type_enum_simple => @panic("TODO"), @@ -971,10 +995,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }), }); }, - .optional_type => |optional_type| { + .opt_type => |opt_type| { ip.items.appendAssumeCapacity(.{ .tag = .type_optional, - .data = @enumToInt(optional_type.payload_type), + .data = @enumToInt(opt_type), }); }, .error_union_type => |error_union_type| { @@ -1192,3 +1216,13 @@ test "basic usage" { } }); try std.testing.expect(another_array_i32 == array_i32); } + +pub fn childType(ip: InternPool, i: Index) Index { + return switch (ip.indexToKey(i)) { + .ptr_type => |ptr_type| ptr_type.elem_type, + .vector_type => |vector_type| vector_type.child, + .array_type => |array_type| array_type.child, + .opt_type => |child| child, + else => unreachable, + }; +} diff --git a/src/Liveness.zig b/src/Liveness.zig index 01fbee9e3608..19659940af3c 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -225,6 +225,7 @@ pub fn categorizeOperand( air: Air, inst: Air.Inst.Index, operand: Air.Inst.Index, + ip: InternPool, ) OperandCategory { const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); @@ -534,7 +535,7 @@ pub fn categorizeOperand( .aggregate_init => { const ty_pl = air_datas[inst].ty_pl; const aggregate_ty = air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { @@ -625,7 +626,7 @@ pub fn categorizeOperand( var operand_live: bool = true; for (air.extra[cond_extra.end..][0..2]) |cond_inst| { - if (l.categorizeOperand(air, cond_inst, operand) == .tomb) + if (l.categorizeOperand(air, cond_inst, operand, ip) == .tomb) operand_live = false; switch (air_tags[cond_inst]) { @@ -872,6 +873,7 @@ fn analyzeInst( data: *LivenessPassData(pass), inst: Air.Inst.Index, ) Allocator.Error!void { + const ip = a.intern_pool; const inst_tags = a.air.instructions.items(.tag); const inst_datas = a.air.instructions.items(.data); @@ -1140,7 +1142,7 @@ fn analyzeInst( .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index e05f1814ceb9..7059fec5074b 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -325,7 +325,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .aggregate_init => { const ty_pl = data[inst].ty_pl; const aggregate_ty = self.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); var bt = self.liveness.iterateBigTomb(inst); diff --git a/src/Module.zig b/src/Module.zig index 5c84b123c121..67ca91266c3e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5805,7 +5805,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // is unused so it just has to be a no-op. sema.air_instructions.set(ptr_inst.*, .{ .tag = .alloc, - .data = .{ .ty = Type.initTag(.single_const_pointer_to_comptime_int) }, + .data = .{ .ty = Type.single_const_pointer_to_comptime_int }, }); } } @@ -6545,7 +6545,7 @@ pub fn populateTestFunctions( } const decl = mod.declPtr(decl_index); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType(); + const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).childType(mod); const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions @@ -6575,7 +6575,7 @@ pub fn populateTestFunctions( errdefer name_decl_arena.deinit(); const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice); const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ - .ty = try Type.Tag.array_u8.create(name_decl_arena.allocator(), bytes.len), + .ty = try Type.array(name_decl_arena.allocator(), bytes.len, null, Type.u8, mod), .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes), }); try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena); @@ -6609,7 +6609,12 @@ pub fn populateTestFunctions( { // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. - const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena)); + const new_ty = try Type.ptr(arena, mod, .{ + .size = .Slice, + .pointee_type = try tmp_test_fn_ty.copy(arena), + .mutable = false, + .@"addrspace" = .generic, + }); const new_var = try gpa.create(Var); errdefer gpa.destroy(new_var); new_var.* = decl.val.castTag(.variable).?.data.*; @@ -6819,6 +6824,34 @@ pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allo return i.toType(); } +pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type { + const i = try intern(mod, .{ .array_type = info }); + return i.toType(); +} + +pub fn vectorType(mod: *Module, info: InternPool.Key.VectorType) Allocator.Error!Type { + const i = try intern(mod, .{ .vector_type = info }); + return i.toType(); +} + +pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!Type { + const i = try intern(mod, .{ .opt_type = child_type }); + return i.toType(); +} + +pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type { + const i = try intern(mod, .{ .ptr_type = info }); + return i.toType(); +} + +pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ .elem_type = child_type.ip_index }); +} + +pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); +} + pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); } diff --git a/src/Sema.zig b/src/Sema.zig index 738971930108..87df2f23e1ab 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -585,13 +585,18 @@ pub const Block = struct { } fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref { + const sema = block.sema; + const mod = sema.mod; return block.addInst(.{ .tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector, .data = .{ .ty_pl = .{ - .ty = try block.sema.addType( - try Type.vector(block.sema.arena, block.sema.typeOf(lhs).vectorLen(), Type.bool), + .ty = try sema.addType( + try mod.vectorType(.{ + .len = sema.typeOf(lhs).vectorLen(mod), + .child = .bool_type, + }), ), - .payload = try block.sema.addExtra(Air.VectorCmp{ + .payload = try sema.addExtra(Air.VectorCmp{ .lhs = lhs, .rhs = rhs, .op = Air.VectorCmp.encodeOp(cmp_op), @@ -1760,7 +1765,7 @@ pub fn resolveConstString( reason: []const u8, ) ![]u8 { const air_inst = try sema.resolveInst(zir_ref); - const wanted_type = Type.initTag(.const_slice_u8); + const wanted_type = Type.const_slice_u8; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, reason); return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod); @@ -1788,7 +1793,8 @@ fn analyzeAsType( } pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void { - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return; + const mod = sema.mod; + if (!mod.backendSupportsFeature(.error_return_trace)) return; assert(!block.is_comptime); var err_trace_block = block.makeSubBlock(); @@ -1798,13 +1804,13 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) // var addrs: [err_return_trace_addr_count]usize = undefined; const err_return_trace_addr_count = 32; - const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, sema.mod); - const addrs_ptr = try err_trace_block.addTy(.alloc, try Type.Tag.single_mut_pointer.create(sema.arena, addr_arr_ty)); + const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, mod); + const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty)); // var st: StackTrace = undefined; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const st_ptr = try err_trace_block.addTy(.alloc, try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty)); + const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "instruction_addresses", src, true); @@ -2101,11 +2107,10 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError { const mod = sema.mod; - const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType() else object_ty; + const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty; if (inner_ty.zigTypeTag(mod) == .Optional) opt: { - var buf: Type.Payload.ElemType = undefined; - const child_ty = inner_ty.optionalChild(&buf); + const child_ty = inner_ty.optionalChild(mod); if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt; const msg = msg: { const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); @@ -2132,7 +2137,7 @@ fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: []const u8) switch (ty.zigTypeTag(mod)) { .Array => return mem.eql(u8, field_name, "len"), .Pointer => { - const ptr_info = ty.ptrInfo().data; + const ptr_info = ty.ptrInfo(mod); if (ptr_info.size == .Slice) { return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len"); } else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { @@ -2504,6 +2509,7 @@ fn coerceResultPtr( dummy_operand: Air.Inst.Ref, trash_block: *Block, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const target = sema.mod.getTarget(); const addr_space = target_util.defaultAddressSpace(target, .local); const pointee_ty = sema.typeOf(dummy_operand); @@ -2547,7 +2553,7 @@ fn coerceResultPtr( return sema.addConstant(ptr_ty, ptr_val); } if (pointee_ty.eql(Type.null, sema.mod)) { - const opt_ty = sema.typeOf(new_ptr).childType(); + const opt_ty = sema.typeOf(new_ptr).childType(mod); const null_inst = try sema.addConstant(opt_ty, Value.null); _ = try block.addBinOp(.store, new_ptr, null_inst); return Air.Inst.Ref.void_value; @@ -3394,7 +3400,7 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer) - operand_ty.childType() + operand_ty.childType(mod) else operand_ty; if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return; @@ -3430,7 +3436,7 @@ fn indexablePtrLen( const mod = sema.mod; const object_ty = sema.typeOf(object); const is_pointer_to = object_ty.isSinglePointer(mod); - const indexable_ty = if (is_pointer_to) object_ty.childType() else object_ty; + const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; try checkIndexable(sema, block, src, indexable_ty); return sema.fieldVal(block, src, object, "len", src); } @@ -3441,9 +3447,10 @@ fn indexablePtrLenOrNone( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); - if (operand_ty.ptrSize() == .Many) return .none; + if (operand_ty.ptrSize(mod) == .Many) return .none; return sema.fieldVal(block, src, operand, "len", src); } @@ -3529,11 +3536,12 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr } fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const alloc = try sema.resolveInst(inst_data.operand); const alloc_ty = sema.typeOf(alloc); - var ptr_info = alloc_ty.ptrInfo().data; + var ptr_info = alloc_ty.ptrInfo(mod); const elem_ty = ptr_info.pointee_type; // Detect if all stores to an `.alloc` were comptime-known. @@ -3589,9 +3597,10 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref { + const mod = sema.mod; const alloc_ty = sema.typeOf(alloc); - var ptr_info = alloc_ty.ptrInfo().data; + var ptr_info = alloc_ty.ptrInfo(mod); ptr_info.mutable = false; const const_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); @@ -3947,13 +3956,13 @@ fn zirArrayBasePtr( const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) { + while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; - const elem_ty = sema.typeOf(base_ptr).childType(); + const elem_ty = sema.typeOf(base_ptr).childType(mod); switch (elem_ty.zigTypeTag(mod)) { .Array, .Vector => return base_ptr, .Struct => if (elem_ty.isTuple()) { @@ -3962,7 +3971,7 @@ fn zirArrayBasePtr( }, else => {}, } - return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType()); + return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod)); } fn zirFieldBasePtr( @@ -3976,18 +3985,18 @@ fn zirFieldBasePtr( const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) { + while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; - const elem_ty = sema.typeOf(base_ptr).childType(); + const elem_ty = sema.typeOf(base_ptr).childType(mod); switch (elem_ty.zigTypeTag(mod)) { .Struct, .Union => return base_ptr, else => {}, } - return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType()); + return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod)); } fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -4129,7 +4138,7 @@ fn validateArrayInitTy( switch (ty.zigTypeTag(mod)) { .Array => { - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count != array_len) { return sema.fail(block, src, "expected {d} array elements; found {d}", .{ array_len, extra.init_count, @@ -4138,7 +4147,7 @@ fn validateArrayInitTy( return; }, .Vector => { - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count != array_len) { return sema.fail(block, src, "expected {d} vector elements; found {d}", .{ array_len, extra.init_count, @@ -4148,7 +4157,7 @@ fn validateArrayInitTy( }, .Struct => if (ty.isTuple()) { _ = try sema.resolveTypeFields(ty); - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count > array_len) { return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{ array_len, extra.init_count, @@ -4194,7 +4203,7 @@ fn zirValidateStructInit( const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); - const agg_ty = sema.typeOf(object_ptr).childType(); + const agg_ty = sema.typeOf(object_ptr).childType(mod); switch (agg_ty.zigTypeTag(mod)) { .Struct => return sema.validateStructInit( block, @@ -4350,6 +4359,7 @@ fn validateStructInit( init_src: LazySrcLoc, instrs: []const Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const gpa = sema.gpa; // Maps field index to field_ptr index of where it was already initialized. @@ -4425,14 +4435,13 @@ fn validateStructInit( try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); - const field_ty = sema.typeOf(default_field_ptr).childType(); + const field_ty = sema.typeOf(default_field_ptr).childType(mod); const init = try sema.addConstant(field_ty, default_val); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } if (root_msg) |msg| { if (struct_ty.castTag(.@"struct")) |struct_obj| { - const mod = sema.mod; const fqn = try struct_obj.data.getFullyQualifiedName(mod); defer gpa.free(fqn); try mod.errNoteNonLazy( @@ -4605,7 +4614,7 @@ fn validateStructInit( try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); - const field_ty = sema.typeOf(default_field_ptr).childType(); + const field_ty = sema.typeOf(default_field_ptr).childType(mod); const init = try sema.addConstant(field_ty, field_values[i]); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } @@ -4624,8 +4633,8 @@ fn zirValidateArrayInit( const first_elem_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, first_elem_ptr_data.payload_index).data; const array_ptr = try sema.resolveInst(elem_ptr_extra.ptr); - const array_ty = sema.typeOf(array_ptr).childType(); - const array_len = array_ty.arrayLen(); + const array_ty = sema.typeOf(array_ptr).childType(mod); + const array_len = array_ty.arrayLen(mod); if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) { .Struct => { @@ -4670,10 +4679,10 @@ fn zirValidateArrayInit( // at comptime so we have almost nothing to do here. However, in case of a // sentinel-terminated array, the sentinel will not have been populated by // any ZIR instructions at comptime; we need to do that here. - if (array_ty.sentinel()) |sentinel_val| { + if (array_ty.sentinel(mod)) |sentinel_val| { const array_len_ref = try sema.addIntUnsigned(Type.usize, array_len); const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true); - const sentinel = try sema.addConstant(array_ty.childType(), sentinel_val); + const sentinel = try sema.addConstant(array_ty.childType(mod), sentinel_val); try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store); } return; @@ -4685,7 +4694,7 @@ fn zirValidateArrayInit( // Collect the comptime element values in case the array literal ends up // being comptime-known. - const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel()); + const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod)); const element_vals = try sema.arena.alloc(Value, array_len_s); const opt_opv = try sema.typeHasOnePossibleValue(array_ty); const air_tags = sema.air_instructions.items(.tag); @@ -4784,7 +4793,7 @@ fn zirValidateArrayInit( // Our task is to delete all the `elem_ptr` and `store` instructions, and insert // instead a single `store` to the array_ptr with a comptime struct value. // Also to populate the sentinel value, if any. - if (array_ty.sentinel()) |sentinel_val| { + if (array_ty.sentinel(mod)) |sentinel_val| { element_vals[instrs.len] = sentinel_val; } @@ -4806,13 +4815,13 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr if (operand_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(sema.mod)}); - } else switch (operand_ty.ptrSize()) { + } else switch (operand_ty.ptrSize(mod)) { .One, .C => {}, .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(sema.mod)}), .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(sema.mod)}), } - if ((try sema.typeHasOnePossibleValue(operand_ty.childType())) != null) { + if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) { // No need to validate the actual pointer value, we don't need it! return; } @@ -5132,7 +5141,7 @@ fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air defer anon_decl.deinit(); const decl_index = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), gop.key_ptr.len), + try Type.array(anon_decl.arena(), gop.key_ptr.len, Value.zero, Type.u8, mod), try Value.Tag.str_lit.create(anon_decl.arena(), gop.key_ptr.*), 0, // default alignment ); @@ -6003,10 +6012,11 @@ fn addDbgVar( air_tag: Air.Inst.Tag, name: []const u8, ) CompileError!void { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); switch (air_tag) { .dbg_var_ptr => { - if (!(try sema.typeHasRuntimeBits(operand_ty.childType()))) return; + if (!(try sema.typeHasRuntimeBits(operand_ty.childType(mod)))) return; }, .dbg_var_val => { if (!(try sema.typeHasRuntimeBits(operand_ty))) return; @@ -6238,7 +6248,7 @@ fn popErrorReturnTrace( const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, "index", src, stack_trace_ty, true); try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store); @@ -6263,7 +6273,7 @@ fn popErrorReturnTrace( // If non-error, then pop the error return trace by restoring the index. const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, "index", src, stack_trace_ty, true); try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store); @@ -6456,16 +6466,15 @@ fn checkCallArgumentCount( switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { - const ptr_info = callee_ty.ptrInfo().data; + const ptr_info = callee_ty.ptrInfo(mod); if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const opt_child = callee_ty.optionalChild(&buf); + const opt_child = callee_ty.optionalChild(mod); if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and - opt_child.childType().zigTypeTag(mod) == .Fn)) + opt_child.childType(mod).zigTypeTag(mod) == .Fn)) { const msg = msg: { const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{ @@ -6529,7 +6538,7 @@ fn callBuiltin( switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { - const ptr_info = callee_ty.ptrInfo().data; + const ptr_info = callee_ty.ptrInfo(mod); if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } @@ -7929,7 +7938,7 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } else if (child_type.zigTypeTag(mod) == .Null) { return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); } - const opt_type = try Type.optional(sema.arena, child_type); + const opt_type = try Type.optional(sema.arena, child_type, mod); return sema.addType(opt_type); } @@ -7949,16 +7958,17 @@ fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr } fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const len = try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known"); + const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known")); const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); try sema.checkVectorElemType(block, elem_type_src, elem_type); - const vector_type = try Type.Tag.vector.create(sema.arena, .{ - .len = @intCast(u32, len), - .elem_type = elem_type, + const vector_type = try mod.vectorType(.{ + .len = len, + .child = elem_type.ip_index, }); return sema.addType(vector_type); } @@ -8377,16 +8387,16 @@ fn analyzeOptionalPayloadPtr( const optional_ptr_ty = sema.typeOf(optional_ptr); assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer); - const opt_type = optional_ptr_ty.elemType(); + const opt_type = optional_ptr_ty.childType(mod); if (opt_type.zigTypeTag(mod) != .Optional) { return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(sema.mod)}); } - const child_type = try opt_type.optionalChildAlloc(sema.arena); + const child_type = opt_type.optionalChild(mod); const child_pointer = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = child_type, .mutable = !optional_ptr_ty.isConstPtr(), - .@"addrspace" = optional_ptr_ty.ptrAddressSpace(), + .@"addrspace" = optional_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| { @@ -8401,7 +8411,7 @@ fn analyzeOptionalPayloadPtr( child_pointer, try Value.Tag.opt_payload_ptr.create(sema.arena, .{ .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(), + .container_ty = optional_ptr_ty.childType(mod), }), ); } @@ -8414,7 +8424,7 @@ fn analyzeOptionalPayloadPtr( child_pointer, try Value.Tag.opt_payload_ptr.create(sema.arena, .{ .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(), + .container_ty = optional_ptr_ty.childType(mod), }), ); } @@ -8448,14 +8458,14 @@ fn zirOptionalPayload( const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const result_ty = switch (operand_ty.zigTypeTag(mod)) { - .Optional => try operand_ty.optionalChildAlloc(sema.arena), + .Optional => operand_ty.optionalChild(mod), .Pointer => t: { - if (operand_ty.ptrSize() != .C) { + if (operand_ty.ptrSize(mod) != .C) { return sema.failWithExpectedOptionalType(block, src, operand_ty); } // TODO https://github.com/ziglang/zig/issues/6597 if (true) break :t operand_ty; - const ptr_info = operand_ty.ptrInfo().data; + const ptr_info = operand_ty.ptrInfo(mod); break :t try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = try ptr_info.pointee_type.copy(sema.arena), .@"align" = ptr_info.@"align", @@ -8569,18 +8579,18 @@ fn analyzeErrUnionPayloadPtr( const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) { + if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.elemType().fmt(sema.mod), + operand_ty.childType(mod).fmt(sema.mod), }); } - const err_union_ty = operand_ty.elemType(); + const err_union_ty = operand_ty.childType(mod); const payload_ty = err_union_ty.errorUnionPayload(); const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = payload_ty, .mutable = !operand_ty.isConstPtr(), - .@"addrspace" = operand_ty.ptrAddressSpace(), + .@"addrspace" = operand_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| { @@ -8596,7 +8606,7 @@ fn analyzeErrUnionPayloadPtr( operand_pointer_ty, try Value.Tag.eu_payload_ptr.create(sema.arena, .{ .container_ptr = ptr_val, - .container_ty = operand_ty.elemType(), + .container_ty = operand_ty.childType(mod), }), ); } @@ -8609,7 +8619,7 @@ fn analyzeErrUnionPayloadPtr( operand_pointer_ty, try Value.Tag.eu_payload_ptr.create(sema.arena, .{ .container_ptr = ptr_val, - .container_ty = operand_ty.elemType(), + .container_ty = operand_ty.childType(mod), }), ); } @@ -8674,13 +8684,13 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) { + if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.elemType().fmt(sema.mod), + operand_ty.childType(mod).fmt(sema.mod), }); } - const result_ty = operand_ty.elemType().errorUnionSet(); + const result_ty = operand_ty.childType(mod).errorUnionSet(); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { @@ -10119,7 +10129,7 @@ fn zirSwitchCapture( const operand_is_ref = cond_tag == .switch_cond_ref; const operand_ptr = try sema.resolveInst(cond_info.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); - const operand_ty = if (operand_is_ref) operand_ptr_ty.childType() else operand_ptr_ty; + const operand_ty = if (operand_is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty; if (block.inline_case_capture != .none) { const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable; @@ -10131,9 +10141,9 @@ fn zirSwitchCapture( if (is_ref) { const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, - .mutable = operand_ptr_ty.ptrIsMutable(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), .@"volatile" = operand_ptr_ty.isVolatilePtr(), - .@"addrspace" = operand_ptr_ty.ptrAddressSpace(), + .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return sema.addConstant( ptr_field_ty, @@ -10150,9 +10160,9 @@ fn zirSwitchCapture( if (is_ref) { const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, - .mutable = operand_ptr_ty.ptrIsMutable(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), .@"volatile" = operand_ptr_ty.isVolatilePtr(), - .@"addrspace" = operand_ptr_ty.ptrAddressSpace(), + .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty); } else { @@ -10235,7 +10245,7 @@ fn zirSwitchCapture( const field_ty_ptr = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = first_field.ty, .@"addrspace" = .generic, - .mutable = operand_ptr_ty.ptrIsMutable(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), }); if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { @@ -10311,7 +10321,7 @@ fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile const cond_data = zir_datas[Zir.refToIndex(inst_data.operand).?].un_node; const operand_ptr = try sema.resolveInst(cond_data.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); - const operand_ty = if (is_ref) operand_ptr_ty.childType() else operand_ptr_ty; + const operand_ty = if (is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty; if (operand_ty.zigTypeTag(mod) != .Union) { const msg = msg: { @@ -10448,7 +10458,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const cond_index = Zir.refToIndex(extra.data.operand).?; const raw_operand = sema.resolveInst(zir_data[cond_index].un_node.operand) catch unreachable; const target_ty = sema.typeOf(raw_operand); - break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.elemType() else target_ty; + break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.childType(mod) else target_ty; }; const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union; @@ -12132,7 +12142,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. embed_file.owner_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), embed_file.bytes.len), + try Type.array(anon_decl.arena(), embed_file.bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), 0, // default alignment ); @@ -12200,7 +12210,7 @@ fn zirShl( const bit_value = Value.initPayload(&bits_payload.base); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { @@ -12220,7 +12230,7 @@ fn zirShl( } if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { @@ -12388,7 +12398,7 @@ fn zirShr( const bit_value = Value.initPayload(&bits_payload.base); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { @@ -12408,7 +12418,7 @@ fn zirShr( } if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { @@ -12571,7 +12581,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (val.isUndef()) { return sema.addConstUndef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { - const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen()); + const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { @@ -12768,8 +12778,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod); const mod = sema.mod; const ptr_addrspace = p: { - if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(); - if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(); + if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod); + if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod); break :p null; }; @@ -12883,9 +12893,9 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins const mod = sema.mod; const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag(mod)) { - .Array => return operand_ty.arrayInfo(), + .Array => return operand_ty.arrayInfo(mod), .Pointer => { - const ptr_info = operand_ty.ptrInfo().data; + const ptr_info = operand_ty.ptrInfo(mod); switch (ptr_info.size) { // TODO: in the Many case here this should only work if the type // has a sentinel, and this code should compute the length based @@ -12900,7 +12910,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins }, .One => { if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { - return ptr_info.pointee_type.arrayInfo(); + return ptr_info.pointee_type.arrayInfo(mod); } }, .C => {}, @@ -12912,7 +12922,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins return .{ .elem_type = peer_ty.elemType2(mod), .sentinel = null, - .len = operand_ty.arrayLen(), + .len = operand_ty.arrayLen(mod), }; } }, @@ -13035,7 +13045,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod); - const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace() else null; + const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { @@ -14022,7 +14032,7 @@ fn intRem( ) CompileError!Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -14484,7 +14494,10 @@ fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value { fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { const mod = sema.mod; - const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1; + const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = .u1_type, + }) else Type.u1; const types = try sema.arena.alloc(Type, 2); const values = try sema.arena.alloc(Value, 2); @@ -14520,7 +14533,7 @@ fn analyzeArithmetic( const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) { + if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize(mod)) { .One, .Slice => {}, .Many, .C => { const air_tag: Air.Inst.Tag = switch (zir_tag) { @@ -14993,9 +15006,9 @@ fn analyzePtrArithmetic( const opt_ptr_val = try sema.resolveMaybeUndefVal(ptr); const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset); const ptr_ty = sema.typeOf(ptr); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Array) - ptr_info.pointee_type.childType() + ptr_info.pointee_type.childType(mod) else ptr_info.pointee_type; @@ -15466,7 +15479,10 @@ fn cmpSelf( if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); if (resolved_type.zigTypeTag(mod) == .Vector) { - const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool); + const result_ty = try mod.vectorType(.{ + .len = resolved_type.vectorLen(mod), + .child = .bool_type, + }); const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type); return sema.addConstant(result_ty, cmp_val); } @@ -15767,6 +15783,7 @@ fn zirBuiltinSrc( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{}); @@ -15778,7 +15795,7 @@ fn zirBuiltinSrc( const name = std.mem.span(fn_owner_decl.name); const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len - 1), + try Type.array(anon_decl.arena(), bytes.len - 1, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes), 0, // default alignment ); @@ -15791,7 +15808,7 @@ fn zirBuiltinSrc( // The compiler must not call realpath anywhere. const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena()); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), name.len), + try Type.array(anon_decl.arena(), name.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), 0, // default alignment ); @@ -16024,7 +16041,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Pointer => { - const info = ty.ptrInfo().data; + const info = ty.ptrInfo(mod); const alignment = if (info.@"align" != 0) try Value.Tag.int_u64.create(sema.arena, info.@"align") else @@ -16059,7 +16076,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Array => { - const info = ty.arrayInfo(); + const info = ty.arrayInfo(mod); const field_values = try sema.arena.alloc(Value, 3); // len: comptime_int, field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); @@ -16077,7 +16094,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Vector => { - const info = ty.arrayInfo(); + const info = ty.arrayInfo(mod); const field_values = try sema.arena.alloc(Value, 2); // len: comptime_int, field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); @@ -16095,7 +16112,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Optional => { const field_values = try sema.arena.alloc(Value, 1); // child: type, - field_values[0] = try Value.Tag.ty.create(sema.arena, try ty.optionalChildAlloc(sema.arena)); + field_values[0] = try Value.Tag.ty.create(sema.arena, ty.optionalChild(mod)); return sema.addConstant( type_info_ty, @@ -16141,7 +16158,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16250,7 +16267,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16338,7 +16355,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16448,7 +16465,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16490,7 +16507,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16666,14 +16683,15 @@ fn typeInfoNamespaceDecls( decl_vals: *std.ArrayList(Value), seen_namespaces: *std.AutoHashMap(*Namespace, void), ) !void { + const mod = sema.mod; const gop = try seen_namespaces.getOrPut(namespace); if (gop.found_existing) return; const decls = namespace.decls.keys(); for (decls) |decl_index| { - const decl = sema.mod.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.kind == .@"usingnamespace") { if (decl.analysis == .in_progress) continue; - try sema.mod.ensureDeclAnalyzed(decl_index); + try mod.ensureDeclAnalyzed(decl_index); const new_ns = decl.val.toType().getNamespace().?; try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); continue; @@ -16684,7 +16702,7 @@ fn typeInfoNamespaceDecls( defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0)); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16770,9 +16788,9 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi .Vector => { const elem_ty = operand.elemType2(mod); const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); - return Type.Tag.vector.create(sema.arena, .{ - .len = operand.vectorLen(), - .elem_type = log2_elem_ty, + return mod.vectorType(.{ + .len = operand.vectorLen(mod), + .child = log2_elem_ty.ip_index, }); }, else => {}, @@ -17207,7 +17225,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr _ = try sema.analyzeBodyInner(&sub_block, body); const operand_ty = sema.typeOf(operand); - const ptr_info = operand_ty.ptrInfo().data; + const ptr_info = operand_ty.ptrInfo(mod); const res_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = err_union_ty.errorUnionPayload(), .@"addrspace" = ptr_info.@"addrspace", @@ -17398,6 +17416,7 @@ fn retWithErrTracing( ret_tag: Air.Inst.Tag, operand: Air.Inst.Ref, ) CompileError!Zir.Inst.Index { + const mod = sema.mod; const need_check = switch (is_non_err) { .bool_true => { _ = try block.addUnOp(ret_tag, operand); @@ -17409,7 +17428,7 @@ fn retWithErrTracing( const gpa = sema.gpa; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const return_err_fn = try sema.getBuiltin("returnError"); const args: [1]Air.Inst.Ref = .{err_return_trace}; @@ -17755,7 +17774,7 @@ fn structInitEmpty( fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref { const mod = sema.mod; - const arr_len = obj_ty.arrayLen(); + const arr_len = obj_ty.arrayLen(mod); if (arr_len != 0) { if (obj_ty.zigTypeTag(mod) == .Array) { return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len}); @@ -17763,7 +17782,7 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); } } - if (obj_ty.sentinel()) |sentinel| { + if (obj_ty.sentinel(mod)) |sentinel| { const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel); return sema.addConstant(obj_ty, val); } else { @@ -18199,6 +18218,7 @@ fn zirArrayInit( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -18208,8 +18228,7 @@ fn zirArrayInit( assert(args.len >= 2); // array_ty + at least one element const array_ty = try sema.resolveType(block, src, args[0]); - const sentinel_val = array_ty.sentinel(); - const mod = sema.mod; + const sentinel_val = array_ty.sentinel(mod); const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null)); defer gpa.free(resolved_args); @@ -18489,14 +18508,16 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { } fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { + const mod = sema.mod; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const opt_ptr_stack_trace_ty = try Type.Tag.optional_single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); + const opt_ptr_stack_trace_ty = try Type.optional(sema.arena, ptr_stack_trace_ty, mod); if (sema.owner_func != null and sema.owner_func.?.calls_or_awaits_errorable_fn and - sema.mod.comp.bin_file.options.error_return_tracing and - sema.mod.backendSupportsFeature(.error_return_trace)) + mod.comp.bin_file.options.error_return_tracing and + mod.backendSupportsFeature(.error_return_trace)) { return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); } @@ -18585,8 +18606,11 @@ fn zirUnaryMath( switch (operand_ty.zigTypeTag(mod)) { .Vector => { const scalar_ty = operand_ty.scalarType(mod); - const vec_len = operand_ty.vectorLen(); - const result_ty = try Type.vector(sema.arena, vec_len, scalar_ty); + const vec_len = operand_ty.vectorLen(mod); + const result_ty = try mod.vectorType(.{ + .len = vec_len, + .child = scalar_ty.ip_index, + }); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); @@ -18730,12 +18754,15 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const len_val = struct_val[0]; const child_val = struct_val[1]; - const len = len_val.toUnsignedInt(mod); + const len = @intCast(u32, len_val.toUnsignedInt(mod)); const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); - const ty = try Type.vector(sema.arena, len, try child_ty.copy(sema.arena)); + const ty = try mod.vectorType(.{ + .len = len, + .child = child_ty.ip_index, + }); return sema.addType(ty); }, .Float => { @@ -18872,7 +18899,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const child_ty = try child_val.toType().copy(sema.arena); - const ty = try Type.optional(sema.arena, child_ty); + const ty = try Type.optional(sema.arena, child_ty, mod); return sema.addType(ty); }, .ErrorUnion => { @@ -18912,7 +18939,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // TODO use reflection instead of magic numbers here // error_set: type, const name_val = struct_val[0]; - const name_str = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, sema.mod); + const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, sema.mod); const kv = try mod.getErrorValue(name_str); const gop = names.getOrPutAssumeCapacity(kv.key); @@ -19038,7 +19065,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const value_val = field_struct_val[1]; const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), + Type.const_slice_u8, new_decl_arena_allocator, sema.mod, ); @@ -19215,7 +19242,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const alignment_val = field_struct_val[2]; const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), + Type.const_slice_u8, new_decl_arena_allocator, sema.mod, ); @@ -19482,7 +19509,7 @@ fn reifyStruct( } const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), + Type.const_slice_u8, new_decl_arena_allocator, mod, ); @@ -19626,7 +19653,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst try sema.checkPtrOperand(block, ptr_src, ptr_ty); - var ptr_info = ptr_ty.ptrInfo().data; + var ptr_info = ptr_ty.ptrInfo(mod); const src_addrspace = ptr_info.@"addrspace"; if (!target_util.addrSpaceCastIsValid(sema.mod.getTarget(), src_addrspace, dest_addrspace)) { const msg = msg: { @@ -19641,7 +19668,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst ptr_info.@"addrspace" = dest_addrspace; const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional) - try Type.optional(sema.arena, dest_ptr_ty) + try Type.optional(sema.arena, dest_ptr_ty, mod) else dest_ptr_ty; @@ -19731,6 +19758,7 @@ fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) } fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, ty_src, inst_data.operand); @@ -19738,10 +19766,10 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try ty.nameAllocArena(anon_decl.arena(), sema.mod); + const bytes = try ty.nameAllocArena(anon_decl.arena(), mod); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -19842,7 +19870,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const elem_ty = ptr_ty.elemType2(mod); const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { const msg = msg: { const msg = try sema.errMsg(block, type_src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -19987,8 +20015,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air try sema.checkPtrType(block, dest_ty_src, dest_ty); try sema.checkPtrOperand(block, operand_src, operand_ty); - const operand_info = operand_ty.ptrInfo().data; - const dest_info = dest_ty.ptrInfo().data; + const operand_info = operand_ty.ptrInfo(mod); + const dest_info = dest_ty.ptrInfo(mod); if (!operand_info.mutable and dest_info.mutable) { const msg = msg: { const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{}); @@ -20042,12 +20070,11 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: { // Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result if (dest_ty.zigTypeTag(mod) == .Optional) { - var buf: Type.Payload.ElemType = undefined; - var dest_ptr_info = dest_ty.optionalChild(&buf).ptrInfo().data; + var dest_ptr_info = dest_ty.optionalChild(mod).ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info)); + break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info), mod); } else { - var dest_ptr_info = dest_ty.ptrInfo().data; + var dest_ptr_info = dest_ty.ptrInfo(mod); dest_ptr_info.@"align" = operand_align; break :blk try Type.ptr(sema.arena, sema.mod, dest_ptr_info); } @@ -20110,6 +20137,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -20117,7 +20145,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData const operand_ty = sema.typeOf(operand); try sema.checkPtrOperand(block, operand_src, operand_ty); - var ptr_info = operand_ty.ptrInfo().data; + var ptr_info = operand_ty.ptrInfo(mod); ptr_info.mutable = true; const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); @@ -20130,6 +20158,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData } fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -20137,7 +20166,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const operand_ty = sema.typeOf(operand); try sema.checkPtrOperand(block, operand_src, operand_ty); - var ptr_info = operand_ty.ptrInfo().data; + var ptr_info = operand_ty.ptrInfo(mod); ptr_info.@"volatile" = false; const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); @@ -20163,7 +20192,10 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); const is_vector = operand_ty.zigTypeTag(mod) == .Vector; const dest_ty = if (is_vector) - try Type.vector(sema.arena, operand_ty.vectorLen(), dest_scalar_ty) + try mod.vectorType(.{ + .len = operand_ty.vectorLen(mod), + .child = dest_scalar_ty.ip_index, + }) else dest_scalar_ty; @@ -20218,7 +20250,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); } var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, operand_ty.vectorLen()); + const elems = try sema.arena.alloc(Value, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod); @@ -20245,7 +20277,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.checkPtrOperand(block, ptr_src, ptr_ty); - var ptr_info = ptr_ty.ptrInfo().data; + var ptr_info = ptr_ty.ptrInfo(mod); ptr_info.@"align" = dest_align; var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); if (ptr_ty.zigTypeTag(mod) == .Optional) { @@ -20314,8 +20346,11 @@ fn zirBitCount( const result_scalar_ty = try mod.smallestUnsignedInt(bits); switch (operand_ty.zigTypeTag(mod)) { .Vector => { - const vec_len = operand_ty.vectorLen(); - const result_ty = try Type.vector(sema.arena, vec_len, result_scalar_ty); + const vec_len = operand_ty.vectorLen(mod); + const result_ty = try mod.vectorType(.{ + .len = vec_len, + .child = result_scalar_ty.ip_index, + }); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); @@ -20388,7 +20423,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (val.isUndef()) return sema.addConstUndef(operand_ty); - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { @@ -20437,7 +20472,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (val.isUndef()) return sema.addConstUndef(operand_ty); - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { @@ -20546,7 +20581,7 @@ fn checkInvalidPtrArithmetic( ) CompileError!void { const mod = sema.mod; switch (try ty.zigTypeTagOrPoison(mod)) { - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .One, .Slice => return, .Many, .C => return sema.fail( block, @@ -20676,7 +20711,7 @@ fn checkNumericType( const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, - .Vector => switch (ty.childType().zigTypeTag(mod)) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, @@ -20726,7 +20761,7 @@ fn checkAtomicPtrOperand( const ptr_ty = sema.typeOf(ptr); const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { - .Pointer => ptr_ty.ptrInfo().data, + .Pointer => ptr_ty.ptrInfo(mod), else => { const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); @@ -20797,7 +20832,7 @@ fn checkIntOrVector( switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int => return operand_ty, .Vector => { - const elem_ty = operand_ty.childType(); + const elem_ty = operand_ty.childType(mod); switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ @@ -20821,7 +20856,7 @@ fn checkIntOrVectorAllowComptime( switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return operand_ty, .Vector => { - const elem_ty = operand_ty.childType(); + const elem_ty = operand_ty.childType(mod); switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ @@ -20870,7 +20905,7 @@ fn checkSimdBinOp( const rhs_ty = sema.typeOf(uncasted_rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen() else null; + var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen(mod) else null; const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); @@ -20912,8 +20947,8 @@ fn checkVectorizableBinaryOperands( }; if (lhs_is_vector and rhs_is_vector) { - const lhs_len = lhs_ty.arrayLen(); - const rhs_len = rhs_ty.arrayLen(); + const lhs_len = lhs_ty.arrayLen(mod); + const rhs_len = rhs_ty.arrayLen(mod); if (lhs_len != rhs_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "vector length mismatch", .{}); @@ -20966,7 +21001,7 @@ fn resolveExportOptions( const name_operand = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); - const name_ty = Type.initTag(.const_slice_u8); + const name_ty = Type.const_slice_u8; const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); @@ -20975,7 +21010,7 @@ fn resolveExportOptions( const section_operand = try sema.fieldVal(block, src, options, "section", section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); - const section_ty = Type.initTag(.const_slice_u8); + const section_ty = Type.const_slice_u8; const section = if (section_opt_val.optionalValue(mod)) |section_val| try section_val.toAllocatedBytes(section_ty, sema.arena, mod) else @@ -21087,7 +21122,7 @@ fn zirCmpxchg( return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{}); } - const result_ty = try Type.optional(sema.arena, elem_ty); + const result_ty = try Type.optional(sema.arena, elem_ty, mod); // special case zero bit types if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) { @@ -21133,6 +21168,7 @@ fn zirCmpxchg( } fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const len_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -21141,9 +21177,9 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const scalar = try sema.resolveInst(extra.rhs); const scalar_ty = sema.typeOf(scalar); try sema.checkVectorElemType(block, scalar_src, scalar_ty); - const vector_ty = try Type.Tag.vector.create(sema.arena, .{ + const vector_ty = try mod.vectorType(.{ .len = len, - .elem_type = scalar_ty, + .child = scalar_ty.ip_index, }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { if (scalar_val.isUndef()) return sema.addConstUndef(vector_ty); @@ -21172,7 +21208,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)}); } - const scalar_ty = operand_ty.childType(); + const scalar_ty = operand_ty.childType(mod); // Type-check depending on operation. switch (operation) { @@ -21190,7 +21226,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. }, } - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); if (vec_len == 0) { // TODO re-evaluate if we should introduce a "neutral value" for some operations, // e.g. zero for add and one for mul. @@ -21243,12 +21279,12 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air var mask_ty = sema.typeOf(mask); const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) { - .Array, .Vector => sema.typeOf(mask).arrayLen(), + .Array, .Vector => sema.typeOf(mask).arrayLen(mod), else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}), }; - mask_ty = try Type.Tag.vector.create(sema.arena, .{ - .len = mask_len, - .elem_type = Type.i32, + mask_ty = try mod.vectorType(.{ + .len = @intCast(u32, mask_len), + .child = .i32_type, }); mask = try sema.coerce(block, mask_ty, mask, mask_src); const mask_val = try sema.resolveConstMaybeUndefVal(block, mask_src, mask, "shuffle mask must be comptime-known"); @@ -21272,13 +21308,13 @@ fn analyzeShuffle( var a = a_arg; var b = b_arg; - const res_ty = try Type.Tag.vector.create(sema.arena, .{ + const res_ty = try mod.vectorType(.{ .len = mask_len, - .elem_type = elem_ty, + .child = elem_ty.ip_index, }); var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) { - .Array, .Vector => sema.typeOf(a).arrayLen(), + .Array, .Vector => sema.typeOf(a).arrayLen(mod), .Undefined => null, else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{ elem_ty.fmt(sema.mod), @@ -21286,7 +21322,7 @@ fn analyzeShuffle( }), }; var maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) { - .Array, .Vector => sema.typeOf(b).arrayLen(), + .Array, .Vector => sema.typeOf(b).arrayLen(mod), .Undefined => null, else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{ elem_ty.fmt(sema.mod), @@ -21296,16 +21332,16 @@ fn analyzeShuffle( if (maybe_a_len == null and maybe_b_len == null) { return sema.addConstUndef(res_ty); } - const a_len = maybe_a_len orelse maybe_b_len.?; - const b_len = maybe_b_len orelse a_len; + const a_len = @intCast(u32, maybe_a_len orelse maybe_b_len.?); + const b_len = @intCast(u32, maybe_b_len orelse a_len); - const a_ty = try Type.Tag.vector.create(sema.arena, .{ + const a_ty = try mod.vectorType(.{ .len = a_len, - .elem_type = elem_ty, + .child = elem_ty.ip_index, }); - const b_ty = try Type.Tag.vector.create(sema.arena, .{ + const b_ty = try mod.vectorType(.{ .len = b_len, - .elem_type = elem_ty, + .child = elem_ty.ip_index, }); if (maybe_a_len == null) a = try sema.addConstUndef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src); @@ -21437,15 +21473,21 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const pred_ty = sema.typeOf(pred_uncoerced); const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { - .Vector, .Array => pred_ty.arrayLen(), + .Vector, .Array => pred_ty.arrayLen(mod), else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}), }; - const vec_len = try sema.usizeCast(block, pred_src, vec_len_u64); + const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64)); - const bool_vec_ty = try Type.vector(sema.arena, vec_len, Type.bool); + const bool_vec_ty = try mod.vectorType(.{ + .len = vec_len, + .child = .bool_type, + }); const pred = try sema.coerce(block, bool_vec_ty, pred_uncoerced, pred_src); - const vec_ty = try Type.vector(sema.arena, vec_len, elem_ty); + const vec_ty = try mod.vectorType(.{ + .len = vec_len, + .child = elem_ty.ip_index, + }); const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src); const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src); @@ -21854,7 +21896,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr } try sema.checkPtrOperand(block, ptr_src, field_ptr_ty); - const field_ptr_ty_info = field_ptr_ty.ptrInfo().data; + const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod); var ptr_ty_data: Type.Payload.Pointer.Data = .{ .pointee_type = parent_ty.structFieldType(field_index), @@ -22052,8 +22094,8 @@ fn analyzeMinMax( } const refined_ty = if (orig_ty.zigTypeTag(mod) == .Vector) blk: { - const elem_ty = orig_ty.childType(); - const len = orig_ty.vectorLen(); + const elem_ty = orig_ty.childType(mod); + const len = orig_ty.vectorLen(mod); if (len == 0) break :blk orig_ty; if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats @@ -22068,7 +22110,10 @@ fn analyzeMinMax( } const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max); - break :blk try Type.vector(sema.arena, len, refined_elem_ty); + break :blk try mod.vectorType(.{ + .len = len, + .child = refined_elem_ty.ip_index, + }); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats if (val.isUndef()) break :blk orig_ty; // can't refine undef @@ -22129,8 +22174,8 @@ fn analyzeMinMax( if (known_undef) break :refine; // can't refine undef const unrefined_ty = sema.typeOf(cur_minmax.?); const is_vector = unrefined_ty.zigTypeTag(mod) == .Vector; - const comptime_elem_ty = if (is_vector) comptime_ty.childType() else comptime_ty; - const unrefined_elem_ty = if (is_vector) unrefined_ty.childType() else unrefined_ty; + const comptime_elem_ty = if (is_vector) comptime_ty.childType(mod) else comptime_ty; + const unrefined_elem_ty = if (is_vector) unrefined_ty.childType(mod) else unrefined_ty; if (unrefined_elem_ty.isAnyFloat()) break :refine; // we can't refine floats @@ -22150,7 +22195,10 @@ fn analyzeMinMax( const final_elem_ty = try mod.intFittingRange(min_val, max_val); const final_ty = if (is_vector) - try Type.vector(sema.arena, unrefined_ty.vectorLen(), final_elem_ty) + try mod.vectorType(.{ + .len = unrefined_ty.vectorLen(mod), + .child = final_elem_ty.ip_index, + }) else final_elem_ty; @@ -22165,7 +22213,7 @@ fn analyzeMinMax( fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref { const mod = sema.mod; - const info = sema.typeOf(ptr).ptrInfo().data; + const info = sema.typeOf(ptr).ptrInfo(mod); if (info.size == .One) { // Already an array pointer. return ptr; @@ -22659,7 +22707,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; - const ty = Type.initTag(.const_slice_u8); + const ty = Type.const_slice_u8; const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known"); if (val.isGenericPoison()) { break :blk FuncLinkSection{ .generic = {} }; @@ -22943,7 +22991,7 @@ fn resolveExternOptions( const name_ref = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known"); - const name = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); + const name = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src); const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known"); @@ -22957,7 +23005,7 @@ fn resolveExternOptions( const library_name = if (!library_name_val.isNull(mod)) blk: { const payload = library_name_val.castTag(.opt_payload).?.data; - const library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); + const library_name = try payload.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); if (library_name.len == 0) { return sema.fail(block, library_src, "library name cannot be empty", .{}); } @@ -22994,7 +23042,7 @@ fn zirBuiltinExtern( if (!ty.isPtrAtRuntime(mod)) { return sema.fail(block, ty_src, "expected (optional) pointer", .{}); } - if (!try sema.validateExternType(ty.childType(), .other)) { + if (!try sema.validateExternType(ty.childType(mod), .other)) { const msg = msg: { const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); @@ -23014,7 +23062,7 @@ fn zirBuiltinExtern( }; if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) { - ty = try Type.optional(sema.arena, ty); + ty = try Type.optional(sema.arena, ty, mod); } // TODO check duplicate extern @@ -23194,7 +23242,7 @@ fn validateRunTimeType( => return false, .Pointer => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); switch (elem_ty.zigTypeTag(mod)) { .Opaque => return true, .Fn => return elem_ty.isFnOrHasRuntimeBits(mod), @@ -23204,11 +23252,10 @@ fn validateRunTimeType( .Opaque => return is_extern, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); return sema.validateRunTimeType(child_ty, is_extern); }, - .Array, .Vector => ty = ty.elemType(), + .Array, .Vector => ty = ty.childType(mod), .ErrorUnion => ty = ty.errorUnionPayload(), @@ -23277,7 +23324,7 @@ fn explainWhyTypeIsComptimeInner( }, .Array, .Vector => { - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set); }, .Pointer => { const elem_ty = ty.elemType2(mod); @@ -23295,12 +23342,11 @@ fn explainWhyTypeIsComptimeInner( } return; } - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(&buf), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set); }, .ErrorUnion => { try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(), type_set); @@ -23451,7 +23497,7 @@ fn explainWhyTypeIsNotExtern( if (ty.isSlice(mod)) { try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{}); } else { - const pointee_ty = ty.childType(); + const pointee_ty = ty.childType(mod); try mod.errNoteNonLazy(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsComptime(msg, src_loc, pointee_ty); } @@ -23698,7 +23744,7 @@ fn panicWithMsg( .@"addrspace" = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic }); const null_stack_trace = try sema.addConstant( - try Type.optional(arena, ptr_stack_trace_ty), + try Type.optional(arena, ptr_stack_trace_ty, mod), Value.null, ); const args: [3]Air.Inst.Ref = .{ msg_inst, null_stack_trace, .null_value }; @@ -23927,7 +23973,7 @@ fn fieldVal( const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) - object_ty.childType() + object_ty.childType(mod) else object_ty; @@ -23936,12 +23982,12 @@ fn fieldVal( if (mem.eql(u8, field_name, "len")) { return sema.addConstant( Type.usize, - try Value.Tag.int_u64.create(arena, inner_ty.arrayLen()), + try Value.Tag.int_u64.create(arena, inner_ty.arrayLen(mod)), ); } else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) { - const ptr_info = object_ty.ptrInfo().data; + const ptr_info = object_ty.ptrInfo(mod); const result_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = ptr_info.pointee_type.childType(), + .pointee_type = ptr_info.pointee_type.childType(mod), .sentinel = ptr_info.sentinel, .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", @@ -23964,7 +24010,7 @@ fn fieldVal( } }, .Pointer => { - const ptr_info = inner_ty.ptrInfo().data; + const ptr_info = inner_ty.ptrInfo(mod); if (ptr_info.size == .Slice) { if (mem.eql(u8, field_name, "ptr")) { const slice = if (is_pointer_to) @@ -24107,7 +24153,7 @@ fn fieldPtr( const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { - .Pointer => object_ptr_ty.elemType(), + .Pointer => object_ptr_ty.childType(mod), else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}), }; @@ -24117,7 +24163,7 @@ fn fieldPtr( const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) - object_ty.childType() + object_ty.childType(mod) else object_ty; @@ -24128,7 +24174,7 @@ fn fieldPtr( defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.usize, - try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen()), + try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen(mod)), 0, // default alignment )); } else { @@ -24154,9 +24200,9 @@ fn fieldPtr( const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = slice_ptr_ty, - .mutable = attr_ptr_ty.ptrIsMutable(), + .mutable = attr_ptr_ty.ptrIsMutable(mod), .@"volatile" = attr_ptr_ty.isVolatilePtr(), - .@"addrspace" = attr_ptr_ty.ptrAddressSpace(), + .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { @@ -24175,9 +24221,9 @@ fn fieldPtr( } else if (mem.eql(u8, field_name, "len")) { const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = Type.usize, - .mutable = attr_ptr_ty.ptrIsMutable(), + .mutable = attr_ptr_ty.ptrIsMutable(mod), .@"volatile" = attr_ptr_ty.isVolatilePtr(), - .@"addrspace" = attr_ptr_ty.ptrAddressSpace(), + .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { @@ -24329,14 +24375,14 @@ fn fieldCallBind( const mod = sema.mod; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); - const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C)) - raw_ptr_ty.childType() + const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C)) + raw_ptr_ty.childType(mod) else return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)}); // Optionally dereference a second pointer to get the concrete type. - const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize() == .One; - const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty; + const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One; + const concrete_ty = if (is_double_ptr) inner_ty.childType(mod) else inner_ty; const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; const object_ptr = if (is_double_ptr) try sema.analyzeLoad(block, src, raw_ptr, src) @@ -24404,9 +24450,9 @@ fn fieldCallBind( // zig fmt: off if (first_param_type.isGenericPoison() or ( first_param_type.zigTypeTag(mod) == .Pointer and - (first_param_type.ptrSize() == .One or - first_param_type.ptrSize() == .C) and - first_param_type.childType().eql(concrete_ty, sema.mod))) + (first_param_type.ptrSize(mod) == .One or + first_param_type.ptrSize(mod) == .C) and + first_param_type.childType(mod).eql(concrete_ty, sema.mod))) { // zig fmt: on // Note that if the param type is generic poison, we know that it must @@ -24425,8 +24471,7 @@ fn fieldCallBind( .arg0_inst = deref, } }; } else if (first_param_type.zigTypeTag(mod) == .Optional) { - var opt_buf: Type.Payload.ElemType = undefined; - const child = first_param_type.optionalChild(&opt_buf); + const child = first_param_type.optionalChild(mod); if (child.eql(concrete_ty, sema.mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ @@ -24434,8 +24479,8 @@ fn fieldCallBind( .arg0_inst = deref, } }; } else if (child.zigTypeTag(mod) == .Pointer and - child.ptrSize() == .One and - child.childType().eql(concrete_ty, sema.mod)) + child.ptrSize(mod) == .One and + child.childType(mod).eql(concrete_ty, sema.mod)) { return .{ .method = .{ .func_inst = decl_val, @@ -24482,15 +24527,15 @@ fn finishFieldCallBind( field_index: u32, object_ptr: Air.Inst.Ref, ) CompileError!ResolvedFieldCallee { + const mod = sema.mod; const arena = sema.arena; const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ .pointee_type = field_ty, - .mutable = ptr_ty.ptrIsMutable(), - .@"addrspace" = ptr_ty.ptrAddressSpace(), + .mutable = ptr_ty.ptrIsMutable(mod), + .@"addrspace" = ptr_ty.ptrAddressSpace(mod), }); - const mod = sema.mod; - const container_ty = ptr_ty.childType(); + const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { if (container_ty.structFieldValueComptime(mod, field_index)) |default_val| { return .{ .direct = try sema.addConstant(field_ty, default_val) }; @@ -24618,7 +24663,7 @@ fn structFieldPtrByIndex( const struct_obj = struct_ty.castTag(.@"struct").?.data; const field = struct_obj.fields.values()[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); - const struct_ptr_ty_info = struct_ptr_ty.ptrInfo().data; + const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod); var ptr_ty_data: Type.Payload.Pointer.Data = .{ .pointee_type = field.ty, @@ -24696,7 +24741,7 @@ fn structFieldPtrByIndex( ptr_field_ty, try Value.Tag.field_ptr.create(sema.arena, .{ .container_ptr = struct_ptr_val, - .container_ty = struct_ptr_ty.childType(), + .container_ty = struct_ptr_ty.childType(mod), .field_index = field_index, }), ); @@ -24846,9 +24891,9 @@ fn unionFieldPtr( const field = union_obj.fields.values()[field_index]; const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ .pointee_type = field.ty, - .mutable = union_ptr_ty.ptrIsMutable(), + .mutable = union_ptr_ty.ptrIsMutable(mod), .@"volatile" = union_ptr_ty.isVolatilePtr(), - .@"addrspace" = union_ptr_ty.ptrAddressSpace(), + .@"addrspace" = union_ptr_ty.ptrAddressSpace(mod), }); const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); @@ -25009,7 +25054,7 @@ fn elemPtr( const indexable_ptr_ty = sema.typeOf(indexable_ptr); const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) { - .Pointer => indexable_ptr_ty.elemType(), + .Pointer => indexable_ptr_ty.childType(mod), else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}), }; try checkIndexable(sema, block, src, indexable_ty); @@ -25046,7 +25091,7 @@ fn elemPtrOneLayerOnly( try checkIndexable(sema, block, src, indexable_ty); - switch (indexable_ty.ptrSize()) { + switch (indexable_ty.ptrSize(mod)) { .Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable); @@ -25065,7 +25110,7 @@ fn elemPtrOneLayerOnly( return block.addPtrElemPtr(indexable, elem_index, result_ty); }, .One => { - assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety); }, } @@ -25091,7 +25136,7 @@ fn elemVal( const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src); switch (indexable_ty.zigTypeTag(mod)) { - .Pointer => switch (indexable_ty.ptrSize()) { + .Pointer => switch (indexable_ty.ptrSize(mod)) { .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable); @@ -25112,7 +25157,7 @@ fn elemVal( return block.addBinOp(.ptr_elem_val, indexable, elem_index); }, .One => { - assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety); return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src); }, @@ -25171,7 +25216,7 @@ fn tupleFieldPtr( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const tuple_ptr_ty = sema.typeOf(tuple_ptr); - const tuple_ty = tuple_ptr_ty.childType(); + const tuple_ty = tuple_ptr_ty.childType(mod); _ = try sema.resolveTypeFields(tuple_ty); const field_count = tuple_ty.structFieldCount(); @@ -25188,9 +25233,9 @@ fn tupleFieldPtr( const field_ty = tuple_ty.structFieldType(field_index); const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, - .mutable = tuple_ptr_ty.ptrIsMutable(), + .mutable = tuple_ptr_ty.ptrIsMutable(mod), .@"volatile" = tuple_ptr_ty.isVolatilePtr(), - .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(), + .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod), }); if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { @@ -25271,10 +25316,10 @@ fn elemValArray( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const array_ty = sema.typeOf(array); - const array_sent = array_ty.sentinel(); - const array_len = array_ty.arrayLen(); + const array_sent = array_ty.sentinel(mod); + const array_len = array_ty.arrayLen(mod); const array_len_s = array_len + @boolToInt(array_sent != null); - const elem_ty = array_ty.childType(); + const elem_ty = array_ty.childType(mod); if (array_len_s == 0) { return sema.fail(block, array_src, "indexing into empty array is not allowed", .{}); @@ -25335,9 +25380,9 @@ fn elemPtrArray( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const array_ptr_ty = sema.typeOf(array_ptr); - const array_ty = array_ptr_ty.childType(); - const array_sent = array_ty.sentinel() != null; - const array_len = array_ty.arrayLen(); + const array_ty = array_ptr_ty.childType(mod); + const array_sent = array_ty.sentinel(mod) != null; + const array_len = array_ty.arrayLen(mod); const array_len_s = array_len + @boolToInt(array_sent); if (array_len_s == 0) { @@ -25396,7 +25441,7 @@ fn elemValSlice( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const slice_ty = sema.typeOf(slice); - const slice_sent = slice_ty.sentinel() != null; + const slice_sent = slice_ty.sentinel(mod) != null; const elem_ty = slice_ty.elemType2(mod); var runtime_src = slice_src; @@ -25453,7 +25498,7 @@ fn elemPtrSlice( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const slice_ty = sema.typeOf(slice); - const slice_sent = slice_ty.sentinel() != null; + const slice_sent = slice_ty.sentinel(mod) != null; const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(slice); // The index must not be undefined since it can be out of bounds. @@ -25614,7 +25659,7 @@ fn coerceExtra( } // T to ?T - const child_type = try dest_ty.optionalChildAlloc(sema.arena); + const child_type = dest_ty.optionalChild(mod); const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { error.NotCoercible => { if (in_memory_result == .no_match) { @@ -25628,7 +25673,7 @@ fn coerceExtra( return try sema.wrapOptional(block, dest_ty, intermediate, inst_src); }, .Pointer => pointer: { - const dest_info = dest_ty.ptrInfo().data; + const dest_info = dest_ty.ptrInfo(mod); // Function body to function pointer. if (inst_ty.zigTypeTag(mod) == .Fn) { @@ -25643,11 +25688,11 @@ fn coerceExtra( if (dest_info.size != .One) break :single_item; if (!inst_ty.isSinglePointer(mod)) break :single_item; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const ptr_elem_ty = inst_ty.childType(); + const ptr_elem_ty = inst_ty.childType(mod); const array_ty = dest_info.pointee_type; if (array_ty.zigTypeTag(mod) != .Array) break :single_item; - const array_elem_ty = array_ty.childType(); - if (array_ty.arrayLen() != 1) break :single_item; + const array_elem_ty = array_ty.childType(mod); + if (array_ty.arrayLen(mod) != 1) break :single_item; const dest_is_mut = dest_info.mutable; switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) { .ok => {}, @@ -25660,9 +25705,9 @@ fn coerceExtra( src_array_ptr: { if (!inst_ty.isSinglePointer(mod)) break :src_array_ptr; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const array_ty = inst_ty.childType(); + const array_ty = inst_ty.childType(mod); if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr; - const array_elem_type = array_ty.childType(); + const array_elem_type = array_ty.childType(mod); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; @@ -25680,7 +25725,7 @@ fn coerceExtra( } if (dest_info.sentinel) |dest_sent| { - if (array_ty.sentinel()) |inst_sent| { + if (array_ty.sentinel(mod)) |inst_sent| { if (!dest_sent.eql(inst_sent, dst_elem_type, sema.mod)) { in_memory_result = .{ .ptr_sentinel = .{ .actual = inst_sent, @@ -25721,7 +25766,7 @@ fn coerceExtra( if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr; // In this case we must add a safety check because the C pointer // could be null. - const src_elem_ty = inst_ty.childType(); + const src_elem_ty = inst_ty.childType(mod); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) { @@ -25784,7 +25829,7 @@ fn coerceExtra( }, .Pointer => p: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; - const inst_info = inst_ty.ptrInfo().data; + const inst_info = inst_ty.ptrInfo(mod); switch (try sema.coerceInMemoryAllowed( block, dest_info.pointee_type, @@ -25814,7 +25859,7 @@ fn coerceExtra( .Union => { // pointer to anonymous struct to pointer to union if (inst_ty.isSinglePointer(mod) and - inst_ty.childType().isAnonStruct() and + inst_ty.childType(mod).isAnonStruct() and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25823,7 +25868,7 @@ fn coerceExtra( .Struct => { // pointer to anonymous struct to pointer to struct if (inst_ty.isSinglePointer(mod) and - inst_ty.childType().isAnonStruct() and + inst_ty.childType(mod).isAnonStruct() and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) { @@ -25835,7 +25880,7 @@ fn coerceExtra( .Array => { // pointer to tuple to pointer to array if (inst_ty.isSinglePointer(mod) and - inst_ty.childType().isTuple() and + inst_ty.childType(mod).isTuple() and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25854,7 +25899,7 @@ fn coerceExtra( } if (!inst_ty.isSinglePointer(mod)) break :to_slice; - const inst_child_ty = inst_ty.childType(); + const inst_child_ty = inst_ty.childType(mod); if (!inst_child_ty.isTuple()) break :to_slice; // empty tuple to zero-length slice @@ -25887,7 +25932,7 @@ fn coerceExtra( .Many => p: { if (!inst_ty.isSlice(mod)) break :p; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; - const inst_info = inst_ty.ptrInfo().data; + const inst_info = inst_ty.ptrInfo(mod); switch (try sema.coerceInMemoryAllowed( block, @@ -26196,9 +26241,8 @@ fn coerceExtra( } // ?T to T - var buf: Type.Payload.ElemType = undefined; if (inst_ty.zigTypeTag(mod) == .Optional and - (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) + (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{}); try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{}); @@ -26399,10 +26443,8 @@ const InMemoryCoercionResult = union(enum) { cur = pair.child; }, .optional_shape => |pair| { - var buf_actual: Type.Payload.ElemType = undefined; - var buf_wanted: Type.Payload.ElemType = undefined; try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.optionalChild(&buf_actual).fmt(sema.mod), pair.wanted.optionalChild(&buf_wanted).fmt(sema.mod), + pair.actual.optionalChild(mod).fmt(sema.mod), pair.wanted.optionalChild(mod).fmt(sema.mod), }); break; }, @@ -26640,10 +26682,8 @@ fn coerceInMemoryAllowed( } // Pointers / Pointer-like Optionals - var dest_buf: Type.Payload.ElemType = undefined; - var src_buf: Type.Payload.ElemType = undefined; - const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty, &dest_buf); - const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty, &src_buf); + const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty); + const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty); if (maybe_dest_ptr_ty) |dest_ptr_ty| { if (maybe_src_ptr_ty) |src_ptr_ty| { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target, dest_src, src_src); @@ -26685,8 +26725,8 @@ fn coerceInMemoryAllowed( // Arrays if (dest_tag == .Array and src_tag == .Array) { - const dest_info = dest_ty.arrayInfo(); - const src_info = src_ty.arrayInfo(); + const dest_info = dest_ty.arrayInfo(mod); + const src_info = src_ty.arrayInfo(mod); if (dest_info.len != src_info.len) { return InMemoryCoercionResult{ .array_len = .{ .actual = src_info.len, @@ -26717,8 +26757,8 @@ fn coerceInMemoryAllowed( // Vectors if (dest_tag == .Vector and src_tag == .Vector) { - const dest_len = dest_ty.vectorLen(); - const src_len = src_ty.vectorLen(); + const dest_len = dest_ty.vectorLen(mod); + const src_len = src_ty.vectorLen(mod); if (dest_len != src_len) { return InMemoryCoercionResult{ .vector_len = .{ .actual = src_len, @@ -26748,8 +26788,8 @@ fn coerceInMemoryAllowed( .wanted = dest_ty, } }; } - const dest_child_type = dest_ty.optionalChild(&dest_buf); - const src_child_type = src_ty.optionalChild(&src_buf); + const dest_child_type = dest_ty.optionalChild(mod); + const src_child_type = src_ty.optionalChild(mod); const child = try sema.coerceInMemoryAllowed(block, dest_child_type, src_child_type, dest_is_mut, target, dest_src, src_src); if (child != .ok) { @@ -27019,8 +27059,8 @@ fn coerceInMemoryAllowedPtrs( src_src: LazySrcLoc, ) !InMemoryCoercionResult { const mod = sema.mod; - const dest_info = dest_ptr_ty.ptrInfo().data; - const src_info = src_ptr_ty.ptrInfo().data; + const dest_info = dest_ptr_ty.ptrInfo(mod); + const src_info = src_ptr_ty.ptrInfo(mod); const ok_ptr_size = src_info.size == dest_info.size or src_info.size == .C or dest_info.size == .C; @@ -27206,11 +27246,12 @@ fn storePtr2( operand_src: LazySrcLoc, air_tag: Air.Inst.Tag, ) CompileError!void { + const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); if (ptr_ty.isConstPtr()) return sema.fail(block, ptr_src, "cannot assign to constant", .{}); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); // To generate better code for tuples, we detect a tuple operand here, and // analyze field loads and stores directly. This avoids an extra allocation + memcpy @@ -27221,7 +27262,6 @@ fn storePtr2( // this code does not handle tuple-to-struct coercion which requires dealing with missing // fields. const operand_ty = sema.typeOf(uncasted_operand); - const mod = sema.mod; if (operand_ty.isTuple() and elem_ty.zigTypeTag(mod) == .Array) { const field_count = operand_ty.structFieldCount(); var i: u32 = 0; @@ -27247,7 +27287,7 @@ fn storePtr2( // as well as working around an LLVM bug: // https://github.com/ziglang/zig/issues/11154 if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| { - const vector_ty = sema.typeOf(vector_ptr).childType(); + const vector_ty = sema.typeOf(vector_ptr).childType(mod); const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, @@ -27288,7 +27328,7 @@ fn storePtr2( try sema.requireRuntimeBlock(block, src, runtime_src); try sema.queueFullTypeResolution(elem_ty); - if (ptr_ty.ptrInfo().data.vector_index == .runtime) { + if (ptr_ty.ptrInfo(mod).vector_index == .runtime) { const ptr_inst = Air.refToIndex(ptr).?; const air_tags = sema.air_instructions.items(.tag); if (air_tags[ptr_inst] == .ptr_elem_ptr) { @@ -27322,8 +27362,8 @@ fn storePtr2( /// pointer. Only if the final element type matches the vector element type, and the /// lengths match. fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { - const array_ty = sema.typeOf(ptr).childType(); const mod = sema.mod; + const array_ty = sema.typeOf(ptr).childType(mod); if (array_ty.zigTypeTag(mod) != .Array) return null; var ptr_inst = Air.refToIndex(ptr) orelse return null; const air_datas = sema.air_instructions.items(.data); @@ -27332,7 +27372,6 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { const prev_ptr = air_datas[ptr_inst].ty_op.operand; const prev_ptr_ty = sema.typeOf(prev_ptr); const prev_ptr_child_ty = switch (prev_ptr_ty.tag()) { - .single_mut_pointer => prev_ptr_ty.castTag(.single_mut_pointer).?.data, .pointer => prev_ptr_ty.castTag(.pointer).?.data.pointee_type, else => return null, }; @@ -27342,9 +27381,9 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { // We have a pointer-to-array and a pointer-to-vector. If the elements and // lengths match, return the result. - const vector_ty = sema.typeOf(prev_ptr).childType(); - if (array_ty.childType().eql(vector_ty.childType(), sema.mod) and - array_ty.arrayLen() == vector_ty.vectorLen()) + const vector_ty = sema.typeOf(prev_ptr).childType(mod); + if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and + array_ty.arrayLen(mod) == vector_ty.vectorLen(mod)) { return prev_ptr; } else { @@ -27476,14 +27515,14 @@ fn beginComptimePtrMutation( switch (parent.pointee) { .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { .Array, .Vector => { - const check_len = parent.ty.arrayLenIncludingSentinel(); + const check_len = parent.ty.arrayLenIncludingSentinel(mod); if (elem_ptr.index >= check_len) { // TODO have the parent include the decl so we can say "declared here" return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ elem_ptr.index, check_len, }); } - const elem_ty = parent.ty.childType(); + const elem_ty = parent.ty.childType(mod); // We might have a pointer to multiple elements of the array (e.g. a pointer // to a sub-array). In this case, we just have to reinterpret the relevant @@ -27510,7 +27549,7 @@ fn beginComptimePtrMutation( defer parent.finishArena(sema.mod); const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); const elems = try arena.alloc(Value, array_len_including_sentinel); @memset(elems, Value.undef); @@ -27536,7 +27575,7 @@ fn beginComptimePtrMutation( defer parent.finishArena(sema.mod); const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(); + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); // bytes.len may be one greater than dest_len because of the case when // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. assert(bytes.len >= dest_len); @@ -27567,13 +27606,13 @@ fn beginComptimePtrMutation( defer parent.finishArena(sema.mod); const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(); + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; const elems = try arena.alloc(Value, @intCast(usize, dest_len)); for (bytes, 0..) |byte, i| { elems[i] = try Value.Tag.int_u64.create(arena, byte); } - if (parent.ty.sentinel()) |sent_val| { + if (parent.ty.sentinel(mod)) |sent_val| { assert(elems.len == bytes.len + 1); elems[bytes.len] = sent_val; } @@ -27603,7 +27642,7 @@ fn beginComptimePtrMutation( const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); const elems = try arena.alloc(Value, array_len_including_sentinel); if (elems.len > 0) elems[0] = repeated_val; for (elems[1..]) |*elem| { @@ -27906,12 +27945,12 @@ fn beginComptimePtrMutation( }, .opt_payload_ptr => { const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { - return sema.beginComptimePtrMutation(block, src, ptr_val, try ptr_elem_ty.optionalChildAlloc(sema.arena)); + return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod)); }; var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); switch (parent.pointee) { .direct => |val_ptr| { - const payload_ty = try parent.ty.optionalChildAlloc(sema.arena); + const payload_ty = parent.ty.optionalChild(mod); switch (val_ptr.tag()) { .undef, .null_value => { // An optional has been initialized to undefined at comptime and now we @@ -27984,7 +28023,7 @@ fn beginComptimePtrMutationInner( // Handle the case that the decl is an array and we're actually trying to point to an element. if (decl_ty.isArrayOrVector(mod)) { - const decl_elem_ty = decl_ty.childType(); + const decl_elem_ty = decl_ty.childType(mod); if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) { return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, @@ -28105,7 +28144,7 @@ fn beginComptimePtrLoad( // If we're loading an elem_ptr that was derived from a different type // than the true type of the underlying decl, we cannot deref directly const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { - const deref_elem_ty = deref.pointee.?.ty.childType(); + const deref_elem_ty = deref.pointee.?.ty.childType(mod); break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; } else false; @@ -28115,12 +28154,12 @@ fn beginComptimePtrLoad( } var array_tv = deref.pointee.?; - const check_len = array_tv.ty.arrayLenIncludingSentinel(); + const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); if (maybe_array_ty) |load_ty| { // It's possible that we're loading a [N]T, in which case we'd like to slice // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector(mod) and load_ty.childType().eql(elem_ty, sema.mod)) { - const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel()); + if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, sema.mod)) { + const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), @@ -28134,7 +28173,7 @@ fn beginComptimePtrLoad( break :blk deref; } if (elem_ptr.index == check_len - 1) { - if (array_tv.ty.sentinel()) |sent| { + if (array_tv.ty.sentinel(mod)) |sent| { deref.pointee = TypedValue{ .ty = elem_ty, .val = sent, @@ -28226,7 +28265,7 @@ fn beginComptimePtrLoad( const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; const payload_ty = switch (ptr_val.tag()) { .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(), - .opt_payload_ptr => try payload_ptr.container_ty.optionalChildAlloc(sema.arena), + .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod), else => unreachable, }; var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty); @@ -28357,12 +28396,13 @@ fn coerceArrayPtrToSlice( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(inst)) |val| { const ptr_array_ty = sema.typeOf(inst); - const array_ty = ptr_array_ty.childType(); + const array_ty = ptr_array_ty.childType(mod); const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = val, - .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()), + .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)), }); return sema.addConstant(dest_ty, slice_val); } @@ -28371,11 +28411,11 @@ fn coerceArrayPtrToSlice( } fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool { - const dest_info = dest_ty.ptrInfo().data; - const inst_info = inst_ty.ptrInfo().data; const mod = sema.mod; - const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or - (inst_info.pointee_type.arrayLen() == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or + const dest_info = dest_ty.ptrInfo(mod); + const inst_info = inst_ty.ptrInfo(mod); + const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel(mod) == 0 or + (inst_info.pointee_type.arrayLen(mod) == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or (inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0); const ok_cv_qualifiers = @@ -28647,7 +28687,8 @@ fn coerceAnonStructToUnionPtrs( ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { - const union_ty = ptr_union_ty.childType(); + const mod = sema.mod; + const union_ty = ptr_union_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src); return sema.analyzeRef(block, union_ty_src, union_inst); @@ -28661,7 +28702,8 @@ fn coerceAnonStructToStructPtrs( ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { - const struct_ty = ptr_struct_ty.childType(); + const mod = sema.mod; + const struct_ty = ptr_struct_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src); return sema.analyzeRef(block, struct_ty_src, struct_inst); @@ -28676,15 +28718,16 @@ fn coerceArrayLike( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const inst_len = inst_ty.arrayLen(); - const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen()); - const target = sema.mod.getTarget(); + const inst_len = inst_ty.arrayLen(mod); + const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen(mod)); + const target = mod.getTarget(); if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ - dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), + dest_ty.fmt(mod), inst_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len}); @@ -28694,8 +28737,8 @@ fn coerceArrayLike( return sema.failWithOwnedErrorMsg(msg); } - const dest_elem_ty = dest_ty.childType(); - const inst_elem_ty = inst_ty.childType(); + const dest_elem_ty = dest_ty.childType(mod); + const inst_elem_ty = inst_ty.childType(mod); const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_elem_ty, inst_elem_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (try sema.resolveMaybeUndefVal(inst)) |inst_val| { @@ -28749,9 +28792,10 @@ fn coerceTupleToArray( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const inst_len = inst_ty.arrayLen(); - const dest_len = dest_ty.arrayLen(); + const inst_len = inst_ty.arrayLen(mod); + const dest_len = dest_ty.arrayLen(mod); if (dest_len != inst_len) { const msg = msg: { @@ -28766,16 +28810,16 @@ fn coerceTupleToArray( return sema.failWithOwnedErrorMsg(msg); } - const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel()); + const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel(mod)); const element_vals = try sema.arena.alloc(Value, dest_elems); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems); - const dest_elem_ty = dest_ty.childType(); + const dest_elem_ty = dest_ty.childType(mod); var runtime_src: ?LazySrcLoc = null; for (element_vals, 0..) |*elem, i_usize| { const i = @intCast(u32, i_usize); if (i_usize == inst_len) { - elem.* = dest_ty.sentinel().?; + elem.* = dest_ty.sentinel(mod).?; element_refs[i] = try sema.addConstant(dest_elem_ty, elem.*); break; } @@ -28812,9 +28856,10 @@ fn coerceTupleToSlicePtrs( ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { - const tuple_ty = sema.typeOf(ptr_tuple).childType(); + const mod = sema.mod; + const tuple_ty = sema.typeOf(ptr_tuple).childType(mod); const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); - const slice_info = slice_ty.ptrInfo().data; + const slice_info = slice_ty.ptrInfo(mod); const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(), slice_info.sentinel, slice_info.pointee_type, sema.mod); const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src); if (slice_info.@"align" != 0) { @@ -28833,8 +28878,9 @@ fn coerceTupleToArrayPtrs( ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); - const ptr_info = ptr_array_ty.ptrInfo().data; + const ptr_info = ptr_array_ty.ptrInfo(mod); const array_ty = ptr_info.pointee_type; const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src); if (ptr_info.@"align" != 0) { @@ -29231,7 +29277,7 @@ fn analyzeLoad( const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag(mod)) { - .Pointer => ptr_ty.childType(), + .Pointer => ptr_ty.childType(mod), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}), }; @@ -29245,7 +29291,7 @@ fn analyzeLoad( } } - if (ptr_ty.ptrInfo().data.vector_index == .runtime) { + if (ptr_ty.ptrInfo(mod).vector_index == .runtime) { const ptr_inst = Air.refToIndex(ptr).?; const air_tags = sema.air_instructions.items(.tag); if (air_tags[ptr_inst] == .ptr_elem_ptr) { @@ -29318,8 +29364,7 @@ fn analyzeIsNull( const inverted_non_null_res = if (invert_logic) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; const operand_ty = sema.typeOf(operand); - var buf: Type.Payload.ElemType = undefined; - if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(&buf).zigTypeTag(mod) == .NoReturn) { + if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(mod).zigTypeTag(mod) == .NoReturn) { return inverted_non_null_res; } if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) { @@ -29339,7 +29384,7 @@ fn analyzePtrIsNonErrComptimeOnly( const mod = sema.mod; const ptr_ty = sema.typeOf(operand); assert(ptr_ty.zigTypeTag(mod) == .Pointer); - const child_ty = ptr_ty.childType(); + const child_ty = ptr_ty.childType(mod); const child_tag = child_ty.zigTypeTag(mod); if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true; @@ -29495,7 +29540,7 @@ fn analyzeSlice( // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. const ptr_ptr_ty = sema.typeOf(ptr_ptr); const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) { - .Pointer => ptr_ptr_ty.elemType(), + .Pointer => ptr_ptr_ty.childType(mod), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}), }; @@ -29506,30 +29551,30 @@ fn analyzeSlice( var ptr_sentinel: ?Value = null; switch (ptr_ptr_child_ty.zigTypeTag(mod)) { .Array => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); - elem_ty = ptr_ptr_child_ty.childType(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); + elem_ty = ptr_ptr_child_ty.childType(mod); }, - .Pointer => switch (ptr_ptr_child_ty.ptrSize()) { + .Pointer => switch (ptr_ptr_child_ty.ptrSize(mod)) { .One => { - const double_child_ty = ptr_ptr_child_ty.childType(); + const double_child_ty = ptr_ptr_child_ty.childType(mod); if (double_child_ty.zigTypeTag(mod) == .Array) { - ptr_sentinel = double_child_ty.sentinel(); + ptr_sentinel = double_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = double_child_ty; - elem_ty = double_child_ty.childType(); + elem_ty = double_child_ty.childType(mod); } else { return sema.fail(block, src, "slice of single-item pointer", .{}); } }, .Many, .C => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; - elem_ty = ptr_ptr_child_ty.childType(); + elem_ty = ptr_ptr_child_ty.childType(mod); - if (ptr_ptr_child_ty.ptrSize() == .C) { + if (ptr_ptr_child_ty.ptrSize(mod) == .C) { if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| { if (ptr_val.isNull(mod)) { return sema.fail(block, src, "slice of null pointer", .{}); @@ -29538,11 +29583,11 @@ fn analyzeSlice( } }, .Slice => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; - elem_ty = ptr_ptr_child_ty.childType(); + elem_ty = ptr_ptr_child_ty.childType(mod); }, }, else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}), @@ -29563,7 +29608,7 @@ fn analyzeSlice( var end_is_len = uncasted_end_opt == .none; const end = e: { if (array_ty.zigTypeTag(mod) == .Array) { - const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()); + const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)); if (!end_is_len) { const end = if (by_length) end: { @@ -29574,10 +29619,10 @@ fn analyzeSlice( if (try sema.resolveMaybeUndefVal(end)) |end_val| { const len_s_val = try Value.Tag.int_u64.create( sema.arena, - array_ty.arrayLenIncludingSentinel(), + array_ty.arrayLenIncludingSentinel(mod), ); if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) { - const sentinel_label: []const u8 = if (array_ty.sentinel() != null) + const sentinel_label: []const u8 = if (array_ty.sentinel(mod) != null) " +1 (sentinel)" else ""; @@ -29617,7 +29662,7 @@ fn analyzeSlice( if (slice_val.isUndef()) { return sema.fail(block, src, "slice of undefined", .{}); } - const has_sentinel = slice_ty.sentinel() != null; + const has_sentinel = slice_ty.sentinel(mod) != null; var int_payload: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = slice_val.sliceLen(mod) + @boolToInt(has_sentinel), @@ -29751,8 +29796,8 @@ fn analyzeSlice( try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false); const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len); - const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data; - const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C; + const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo(mod); + const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize(mod) != .C; if (opt_new_len_val) |new_len_val| { const new_len_int = new_len_val.toUnsignedInt(mod); @@ -29780,7 +29825,7 @@ fn analyzeSlice( if (slice_ty.isSlice(mod)) { const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); - const actual_len = if (slice_ty.sentinel() == null) + const actual_len = if (slice_ty.sentinel(mod) == null) slice_len_inst else try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true); @@ -29839,7 +29884,7 @@ fn analyzeSlice( // requirement: end <= len const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array) - try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel()) + try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel(mod)) else if (slice_ty.isSlice(mod)) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the @@ -29848,7 +29893,7 @@ fn analyzeSlice( } const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); - if (slice_ty.sentinel() == null) break :blk slice_len_inst; + if (slice_ty.sentinel(mod) == null) break :blk slice_len_inst; // we have to add one because slice lengths don't include the sentinel break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true); @@ -30284,7 +30329,10 @@ fn cmpVector( const casted_lhs = try sema.coerce(block, resolved_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src); - const result_ty = try Type.vector(sema.arena, lhs_ty.vectorLen(), Type.bool); + const result_ty = try mod.vectorType(.{ + .len = lhs_ty.vectorLen(mod), + .child = .bool_type, + }); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { @@ -30484,12 +30532,12 @@ fn resolvePeerTypes( } continue; }, - .Pointer => if (chosen_ty.ptrSize() == .C) continue, + .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue, else => {}, }, .ComptimeInt => switch (chosen_ty_tag) { .Int, .Float, .ComptimeFloat => continue, - .Pointer => if (chosen_ty.ptrSize() == .C) continue, + .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue, else => {}, }, .Float => switch (chosen_ty_tag) { @@ -30654,10 +30702,10 @@ fn resolvePeerTypes( }, }, .Pointer => { - const cand_info = candidate_ty.ptrInfo().data; + const cand_info = candidate_ty.ptrInfo(mod); switch (chosen_ty_tag) { .Pointer => { - const chosen_info = chosen_ty.ptrInfo().data; + const chosen_info = chosen_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30690,8 +30738,8 @@ fn resolvePeerTypes( chosen_info.pointee_type.zigTypeTag(mod) == .Array and cand_info.pointee_type.zigTypeTag(mod) == .Array) { - const chosen_elem_ty = chosen_info.pointee_type.childType(); - const cand_elem_ty = cand_info.pointee_type.childType(); + const chosen_elem_ty = chosen_info.pointee_type.childType(mod); + const cand_elem_ty = cand_info.pointee_type.childType(mod); const chosen_ok = .ok == try sema.coerceInMemoryAllowed(block, chosen_elem_ty, cand_elem_ty, chosen_info.mutable, target, src, src); if (chosen_ok) { @@ -30757,10 +30805,9 @@ fn resolvePeerTypes( } }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const chosen_ptr_ty = chosen_ty.optionalChild(&opt_child_buf); + const chosen_ptr_ty = chosen_ty.optionalChild(mod); if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { - const chosen_info = chosen_ptr_ty.ptrInfo().data; + const chosen_info = chosen_ptr_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30777,7 +30824,7 @@ fn resolvePeerTypes( .ErrorUnion => { const chosen_ptr_ty = chosen_ty.errorUnionPayload(); if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { - const chosen_info = chosen_ptr_ty.ptrInfo().data; + const chosen_info = chosen_ptr_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30802,8 +30849,7 @@ fn resolvePeerTypes( } }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf); + const opt_child_ty = candidate_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, chosen_ty, opt_child_ty, false, target, src, src)) == .ok) { seen_const = seen_const or opt_child_ty.isConstPtr(); any_are_null = true; @@ -30818,13 +30864,13 @@ fn resolvePeerTypes( }, .Vector => switch (chosen_ty_tag) { .Vector => { - const chosen_len = chosen_ty.vectorLen(); - const candidate_len = candidate_ty.vectorLen(); + const chosen_len = chosen_ty.vectorLen(mod); + const candidate_len = candidate_ty.vectorLen(mod); if (chosen_len != candidate_len) continue; - const chosen_child_ty = chosen_ty.childType(); - const candidate_child_ty = candidate_ty.childType(); + const chosen_child_ty = chosen_ty.childType(mod); + const candidate_child_ty = candidate_ty.childType(mod); if (chosen_child_ty.zigTypeTag(mod) == .Int and candidate_child_ty.zigTypeTag(mod) == .Int) { const chosen_info = chosen_child_ty.intInfo(mod); const candidate_info = candidate_child_ty.intInfo(mod); @@ -30853,8 +30899,8 @@ fn resolvePeerTypes( .Vector => continue, else => {}, }, - .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag(mod) == .Fn) { - if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(), candidate_ty, target, src, src)) { + .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr() and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) { + if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(mod), candidate_ty, target, src, src)) { continue; } }, @@ -30874,8 +30920,7 @@ fn resolvePeerTypes( continue; }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf); + const opt_child_ty = chosen_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, opt_child_ty, candidate_ty, false, target, src, src)) == .ok) { continue; } @@ -30949,16 +30994,16 @@ fn resolvePeerTypes( if (convert_to_slice) { // turn *[N]T => []T - const chosen_child_ty = chosen_ty.childType(); - var info = chosen_ty.ptrInfo(); - info.data.sentinel = chosen_child_ty.sentinel(); - info.data.size = .Slice; - info.data.mutable = !(seen_const or chosen_child_ty.isConstPtr()); - info.data.pointee_type = chosen_child_ty.elemType2(mod); - - const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); + const chosen_child_ty = chosen_ty.childType(mod); + var info = chosen_ty.ptrInfo(mod); + info.sentinel = chosen_child_ty.sentinel(mod); + info.size = .Slice; + info.mutable = !(seen_const or chosen_child_ty.isConstPtr()); + info.pointee_type = chosen_child_ty.elemType2(mod); + + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; @@ -30970,22 +31015,22 @@ fn resolvePeerTypes( switch (chosen_ty.zigTypeTag(mod)) { .ErrorUnion => { const ptr_ty = chosen_ty.errorUnionPayload(); - var info = ptr_ty.ptrInfo(); - info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); + var info = ptr_ty.ptrInfo(mod); + info.mutable = false; + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); }, .Pointer => { - var info = chosen_ty.ptrInfo(); - info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); + var info = chosen_ty.ptrInfo(mod); + info.mutable = false; + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; @@ -30998,7 +31043,7 @@ fn resolvePeerTypes( if (any_are_null) { const opt_ty = switch (chosen_ty.zigTypeTag(mod)) { .Null, .Optional => chosen_ty, - else => try Type.optional(sema.arena, chosen_ty), + else => try Type.optional(sema.arena, chosen_ty, mod), }; const set_ty = err_set_ty orelse return opt_ty; return try Type.errorUnion(sema.arena, set_ty, opt_ty, mod); @@ -31077,13 +31122,12 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { .Struct => return sema.resolveStructLayout(ty), .Union => return sema.resolveUnionLayout(ty), .Array => { - if (ty.arrayLenIncludingSentinel() == 0) return; - const elem_ty = ty.childType(); + if (ty.arrayLenIncludingSentinel(mod) == 0) return; + const elem_ty = ty.childType(mod); return sema.resolveTypeLayout(elem_ty); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); // In case of querying the ABI alignment of this optional, we will ask // for hasRuntimeBits() of the payload type, so we need "requires comptime" // to be known already before this function returns. @@ -31343,10 +31387,10 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Pointer) { - switch (ty.ptrSize()) { + switch (ty.ptrSize(mod)) { .Slice, .Many, .C => return, .One => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); if (elem_ty.zigTypeTag(mod) == .Array) return; // TODO https://github.com/ziglang/zig/issues/15479 // if (elem_ty.isTuple()) return; @@ -31418,8 +31462,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .int_type => false, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16, @@ -31478,12 +31522,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }; return switch (ty.tag()) { - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, .empty_struct_literal, .empty_struct, .error_set, @@ -31491,34 +31529,20 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_inferred, .error_set_merged, .@"opaque", - .array_u8, - .array_u8_sentinel_0, .enum_simple, => false, - .single_const_pointer_to_comptime_int, - .function, - => true, + .function => true, .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .array, .array_sentinel, - .vector, - => return sema.resolveTypeRequiresComptime(ty.childType()), + => return sema.resolveTypeRequiresComptime(ty.childType(mod)), - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); + .pointer => { + const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { return child_ty.fnInfo().is_generic; } else { @@ -31526,12 +31550,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return sema.resolveTypeRequiresComptime(ty.optionalChild(&buf)); + .optional => { + return sema.resolveTypeRequiresComptime(ty.optionalChild(mod)); }, .tuple, .anon_struct => { @@ -31609,7 +31629,7 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .Pointer => { - const child_ty = try sema.resolveTypeFields(ty.childType()); + const child_ty = try sema.resolveTypeFields(ty.childType(mod)); return sema.resolveTypeFully(child_ty); }, .Struct => switch (ty.tag()) { @@ -31624,10 +31644,9 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { else => {}, }, .Union => return sema.resolveUnionFully(ty), - .Array => return sema.resolveTypeFully(ty.childType()), + .Array => return sema.resolveTypeFully(ty.childType(mod)), .Optional => { - var buf: Type.Payload.ElemType = undefined; - return sema.resolveTypeFully(ty.optionalChild(&buf)); + return sema.resolveTypeFully(ty.optionalChild(mod)); }, .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload()), .Fn => { @@ -32897,10 +32916,14 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } }, - .ptr_type => @panic("TODO"), + .ptr_type => return null, .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| { + if (vector_type.len == 0) return Value.initTag(.empty_array); + if (try sema.typeHasOnePossibleValue(vector_type.child.toType())) |v| return v; + return null; + }, + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16, @@ -32963,34 +32986,15 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set_merged, .error_union, .function, - .single_const_pointer_to_comptime_int, .array_sentinel, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .anyerror_void_error_union, .error_set_inferred, .@"opaque", - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, .anyframe_T, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer, - .single_mut_pointer, .pointer, => return null, .optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn()) { return Value.null; } else { @@ -33111,10 +33115,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .vector, .array, .array_u8 => { - if (ty.arrayLen() == 0) + .array => { + if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); - if ((try sema.typeHasOnePossibleValue(ty.elemType())) != null) { + if ((try sema.typeHasOnePossibleValue(ty.childType(mod))) != null) { return Value.initTag(.the_only_possible_value); } return null; @@ -33147,20 +33151,13 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .data = .{ .interned = ty.ip_index }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + } else { + try sema.air_instructions.append(sema.gpa, .{ + .tag = .const_ty, + .data = .{ .ty = ty }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } - switch (ty.tag()) { - .manyptr_u8 => return .manyptr_u8_type, - .manyptr_const_u8 => return .manyptr_const_u8_type, - .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, - .const_slice_u8 => return .const_slice_u8_type, - .anyerror_void_error_union => return .anyerror_void_error_union_type, - else => {}, - } - try sema.air_instructions.append(sema.gpa, .{ - .tag = .const_ty, - .data = .{ .ty = ty }, - }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { @@ -33173,6 +33170,15 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { const gpa = sema.gpa; + if (val.ip_index != .none) { + if (@enumToInt(val.ip_index) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(val.ip_index)); + try sema.air_instructions.append(gpa, .{ + .tag = .interned, + .data = .{ .interned = val.ip_index }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + } const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); try sema.air_instructions.append(gpa, .{ @@ -33331,7 +33337,8 @@ pub fn analyzeAddressSpace( /// Asserts the value is a pointer and dereferences it. /// Returns `null` if the pointer contents cannot be loaded at comptime. fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { - const load_ty = ptr_ty.childType(); + const mod = sema.mod; + const load_ty = ptr_ty.childType(mod); const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty, true); switch (res) { .runtime_load => return null, @@ -33422,11 +33429,7 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError /// This can return `error.AnalysisFail` because it sometimes requires resolving whether /// a type has zero bits, which can cause a "foo depends on itself" compile error. /// This logic must be kept in sync with `Type.isPtrLikeOptional`. -fn typePtrOrOptionalPtrTy( - sema: *Sema, - ty: Type, - buf: *Type.Payload.ElemType, -) !?Type { +fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { const mod = sema.mod; if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -33435,14 +33438,14 @@ fn typePtrOrOptionalPtrTy( .C => return ptr_type.elem_type.toType(), .One, .Many => return ty, }, - .optional_type => |o| switch (mod.intern_pool.indexToKey(o.payload_type)) { + .opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice, .C => return null, .Many, .One => { if (ptr_type.is_allowzero) return null; // optionals of zero sized types behave like bools, not pointers - const payload_ty = o.payload_type.toType(); + const payload_ty = opt_child.toType(); if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) { return null; } @@ -33456,25 +33459,9 @@ fn typePtrOrOptionalPtrTy( }; switch (ty.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return ty.optionalChild(buf), - - .single_const_pointer_to_comptime_int, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return ty, - - .pointer => switch (ty.ptrSize()) { + .pointer => switch (ty.ptrSize(mod)) { .Slice => return null, - .C => return ty.optionalChild(buf), + .C => return ty.optionalChild(mod), else => return ty, }, @@ -33482,10 +33469,10 @@ fn typePtrOrOptionalPtrTy( .inferred_alloc_mut => unreachable, .optional => { - const child_type = ty.optionalChild(buf); + const child_type = ty.optionalChild(mod); if (child_type.zigTypeTag(mod) != .Pointer) return null; - const info = child_type.ptrInfo().data; + const info = child_type.ptrInfo(mod); switch (info.size) { .Slice, .C => return null, .Many, .One => { @@ -33518,8 +33505,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .int_type => return false, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| return sema.typeRequiresComptime(vector_type.child.toType()), + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| return switch (t) { .f16, @@ -33578,12 +33565,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } } return switch (ty.tag()) { - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, .empty_struct_literal, .empty_struct, .error_set, @@ -33591,34 +33572,20 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_inferred, .error_set_merged, .@"opaque", - .array_u8, - .array_u8_sentinel_0, .enum_simple, => false, - .single_const_pointer_to_comptime_int, - .function, - => true, + .function => true, .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .array, .array_sentinel, - .vector, - => return sema.typeRequiresComptime(ty.childType()), + => return sema.typeRequiresComptime(ty.childType(mod)), - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); + .pointer => { + const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { return child_ty.fnInfo().is_generic; } else { @@ -33626,12 +33593,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return sema.typeRequiresComptime(ty.optionalChild(&buf)); + .optional => { + return sema.typeRequiresComptime(ty.optionalChild(mod)); }, .tuple, .anon_struct => { @@ -33814,7 +33777,7 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -33874,7 +33837,7 @@ fn intSub( ) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -33934,7 +33897,7 @@ fn floatAdd( ) !Value { const mod = sema.mod; if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); + const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -33992,7 +33955,7 @@ fn floatSub( ) !Value { const mod = sema.mod; if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); + const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -34050,8 +34013,8 @@ fn intSubWithOverflow( ) !Value.OverflowArithmeticResult { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -34105,8 +34068,8 @@ fn floatToInt( ) CompileError!Value { const mod = sema.mod; if (float_ty.zigTypeTag(mod) == .Vector) { - const elem_ty = float_ty.childType(); - const result_data = try sema.arena.alloc(Value, float_ty.vectorLen()); + const elem_ty = float_ty.childType(mod); + const result_data = try sema.arena.alloc(Value, float_ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(sema.mod, i, &buf); @@ -34383,8 +34346,8 @@ fn intAddWithOverflow( ) !Value.OverflowArithmeticResult { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -34442,7 +34405,7 @@ fn compareAll( const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < ty.vectorLen()) : (i += 1) { + while (i < ty.vectorLen(mod)) : (i += 1) { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); @@ -34490,7 +34453,7 @@ fn compareVector( ) !Value { const mod = sema.mod; assert(ty.zigTypeTag(mod) == .Vector); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -34511,10 +34474,10 @@ fn compareVector( /// This code is duplicated in `analyzePtrArithmetic`. fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { const mod = sema.mod; - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const elem_ty = ptr_ty.elemType2(mod); const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0; - const parent_ty = ptr_ty.childType(); + const parent_ty = ptr_ty.childType(mod); const VI = Type.Payload.Pointer.Data.VectorIndex; @@ -34522,14 +34485,14 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { host_size: u16 = 0, alignment: u32 = 0, vector_index: VI = .none, - } = if (parent_ty.tag() == .vector and ptr_info.size == .One) blk: { + } = if (parent_ty.isVector(mod) and ptr_info.size == .One) blk: { const elem_bits = elem_ty.bitSize(mod); if (elem_bits == 0) break :blk .{}; const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); if (!is_packed) break :blk .{}; break :blk .{ - .host_size = @intCast(u16, parent_ty.arrayLen()), + .host_size = @intCast(u16, parent_ty.arrayLen(mod)), .alignment = @intCast(u16, parent_ty.abiAlignment(mod)), .vector_index = if (offset) |some| @intToEnum(VI, some) else .runtime, }; diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 877a8f5f4cb8..7f599caafbec 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -77,15 +77,6 @@ pub fn print( return writer.writeAll("(variable)"); while (true) switch (val.tag()) { - .single_const_pointer_to_comptime_int_type => return writer.writeAll("*const comptime_int"), - .const_slice_u8_type => return writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0_type => return writer.writeAll("[:0]const u8"), - .anyerror_void_error_union_type => return writer.writeAll("anyerror!void"), - - .manyptr_u8_type => return writer.writeAll("[*]u8"), - .manyptr_const_u8_type => return writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0_type => return writer.writeAll("[*:0]const u8"), - .empty_struct_value, .aggregate => { if (level == 0) { return writer.writeAll(".{ ... }"); @@ -112,7 +103,7 @@ pub fn print( return writer.writeAll("}"); } else { const elem_ty = ty.elemType2(mod); - const len = ty.arrayLen(); + const len = ty.arrayLen(mod); if (elem_ty.eql(Type.u8, mod)) str: { const max_len = @intCast(usize, std.math.min(len, max_string_len)); @@ -288,7 +279,7 @@ pub fn print( .ty = ty.elemType2(mod), .val = val.castTag(.repeated).?.data, }; - const len = ty.arrayLen(); + const len = ty.arrayLen(mod); const max_len = std.math.min(len, max_aggregate_items); while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); @@ -306,7 +297,7 @@ pub fn print( try writer.writeAll(".{ "); try print(.{ .ty = ty.elemType2(mod), - .val = ty.sentinel().?, + .val = ty.sentinel(mod).?, }, writer, level - 1, mod); return writer.writeAll(" }"); }, @@ -364,8 +355,7 @@ pub fn print( }, .opt_payload => { val = val.castTag(.opt_payload).?.data; - var buf: Type.Payload.ElemType = undefined; - ty = ty.optionalChild(&buf); + ty = ty.optionalChild(mod); return print(.{ .ty = ty, .val = val }, writer, level, mod); }, .eu_payload_ptr => { @@ -386,13 +376,8 @@ pub fn print( try writer.writeAll(", &(payload of "); - var ptr_ty: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = data.container_ty, - }; - try print(.{ - .ty = Type.initPayload(&ptr_ty.base), + .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), .val = data.container_ptr, }, writer, level - 1, mod); @@ -415,13 +400,8 @@ pub fn print( try writer.writeAll(", &(payload of "); - var ptr_ty: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = data.container_ty, - }; - try print(.{ - .ty = Type.initPayload(&ptr_ty.base), + .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), .val = data.container_ptr, }, writer, level - 1, mod); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 503bbdbb02b1..81169750c1a1 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -1030,7 +1030,7 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).childType(mod); if (!elem_ty.hasRuntimeBits(mod)) { // return the stack offset 0. Stack offset 0 will be where all @@ -1140,17 +1140,14 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result: MCValue = switch (self.ret_mcv) { .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) }, .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into const ret_ty = self.fn_type.fnReturnType(); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the // result into @@ -2406,9 +2403,9 @@ fn ptrArithmetic( assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const elem_size = elem_ty.abiSize(mod); @@ -3024,8 +3021,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue { const mod = self.bin_file.options.module.?; - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&opt_buf); + const payload_ty = optional_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBits(mod)) return MCValue.none; if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we reuse the operand here? @@ -3459,7 +3455,7 @@ fn ptrElemVal( maybe_inst: ?Air.Inst.Index, ) !MCValue { const mod = self.bin_file.options.module.?; - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); // TODO optimize for elem_sizes of 1, 2, 4, 8 @@ -3617,7 +3613,7 @@ fn reuseOperand( fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const mod = self.bin_file.options.module.?; - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); switch (ptr) { @@ -3773,7 +3769,7 @@ fn genInlineMemset( ) !void { const dst_reg = switch (dst) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst), + else => try self.copyToTmpRegister(Type.manyptr_u8, dst), }; const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -4096,7 +4092,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); - const struct_ty = ptr_ty.childType(); + const struct_ty = ptr_ty.childType(mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { @@ -4173,7 +4169,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); - const struct_ty = self.air.getRefType(ty_pl.ty).childType(); + const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { @@ -4254,7 +4250,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -4280,11 +4276,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ret_ptr_reg = self.registerAlias(.x0, Type.usize); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.register_manager.getReg(ret_ptr_reg, null); try self.genSetReg(ptr_ty, ret_ptr_reg, .{ .ptr_stack_offset = stack_offset }); @@ -4453,11 +4445,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { // // self.ret_mcv is an address to where this function // should store its result into - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(self.ret_mcv, operand, ptr_ty, ret_ty); }, else => unreachable, @@ -4533,8 +4521,7 @@ fn cmp( const mod = self.bin_file.options.module.?; const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); + const payload_ty = lhs_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { @@ -4850,8 +4837,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { const mod = self.bin_file.options.module.?; const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = operand_ty.optionalChild(&buf); + const payload_ty = operand_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :blk .{ .ty = operand_ty, .bind = operand_bind }; @@ -4947,11 +4933,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4973,11 +4960,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4999,11 +4987,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -5025,11 +5014,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -5511,11 +5501,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5833,11 +5819,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5957,12 +5939,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -6079,8 +6062,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 55ec0d412593..c08cb58c48ea 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1010,7 +1010,7 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).childType(mod); if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, @@ -1117,17 +1117,14 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result: MCValue = switch (self.ret_mcv) { .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) }, .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into const ret_ty = self.fn_type.fnReturnType(); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the // result into @@ -2372,8 +2369,8 @@ fn ptrElemVal( ptr_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const elem_ty = ptr_ty.childType(); const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (elem_size) { @@ -2474,7 +2471,8 @@ fn arrayElemVal( array_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const elem_ty = array_ty.childType(); + const mod = self.bin_file.options.module.?; + const elem_ty = array_ty.childType(mod); const mcv = try array_bind.resolveToMcv(self); switch (mcv) { @@ -2508,11 +2506,7 @@ fn arrayElemVal( const base_bind: ReadArg.Bind = .{ .mcv = ptr_to_mcv }; - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = elem_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(elem_ty); return try self.ptrElemVal(base_bind, index_bind, ptr_ty, maybe_inst); }, @@ -2659,8 +2653,8 @@ fn reuseOperand( } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (ptr) { @@ -2888,7 +2882,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); - const struct_ty = ptr_ty.childType(); + const struct_ty = ptr_ty.childType(mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { @@ -3004,7 +2998,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); - const struct_ty = self.air.getRefType(ty_pl.ty).childType(); + const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod); if (struct_ty.zigTypeTag(mod) == .Union) { return self.fail("TODO implement @fieldParentPtr codegen for unions", .{}); @@ -3898,9 +3892,9 @@ fn ptrArithmetic( assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const elem_size = @intCast(u32, elem_ty.abiSize(mod)); @@ -4079,7 +4073,7 @@ fn genInlineMemset( ) !void { const dst_reg = switch (dst) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst), + else => try self.copyToTmpRegister(Type.manyptr_u8, dst), }; const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -4229,7 +4223,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -4259,11 +4253,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.register_manager.getReg(.r0, null); try self.genSetReg(ptr_ty, .r0, .{ .ptr_stack_offset = stack_offset }); @@ -4401,11 +4391,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { // // self.ret_mcv is an address to where this function // should store its result into - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(self.ret_mcv, operand, ptr_ty, ret_ty); }, else => unreachable, // invalid return result @@ -4482,8 +4468,7 @@ fn cmp( const mod = self.bin_file.options.module.?; const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); + const payload_ty = lhs_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { @@ -4837,11 +4822,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4863,11 +4849,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4924,11 +4911,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4950,11 +4938,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -5455,11 +5444,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5816,11 +5801,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5908,12 +5889,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const stack_offset = try self.allocMem(8, 8, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); @@ -6026,8 +6008,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 488b937141d0..1e5858a948f9 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -807,7 +807,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).childType(mod); const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -1099,9 +1099,9 @@ fn binOp( switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const elem_size = elem_ty.abiSize(mod); @@ -1502,7 +1502,8 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -2496,8 +2497,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index c565b6dc237b..f8a62f9798b6 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -838,8 +838,9 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { @@ -871,12 +872,13 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -1300,7 +1302,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -1440,8 +1442,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { .Pointer => Type.usize, .ErrorSet => Type.u16, .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); + const payload_ty = lhs_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { @@ -2447,6 +2448,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -2456,8 +2458,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const index_mcv = try self.resolveInst(bin_op.rhs); const slice_ty = self.typeOf(bin_op.lhs); - const elem_ty = slice_ty.childType(); - const mod = self.bin_file.options.module.?; + const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; @@ -2797,7 +2798,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).childType(mod); if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, @@ -3001,9 +3002,9 @@ fn binOp( switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const elem_size = elem_ty.abiSize(mod); @@ -3019,7 +3020,7 @@ fn binOp( // multiplying it with elem_size const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null); - const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null); + const addr = try self.binOp(tag, lhs, offset, Type.manyptr_u8, Type.usize, null); return addr; } }, @@ -4042,11 +4043,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp); const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs); @@ -4269,7 +4266,7 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void { fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const mod = self.bin_file.options.module.?; - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); switch (ptr) { @@ -4729,7 +4726,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); - const struct_ty = ptr_ty.childType(); + const struct_ty = ptr_ty.childType(mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 7fc5dbc8255d..96304628e9b1 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1542,7 +1542,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { const mod = func.bin_file.base.options.module.?; const ptr_ty = func.typeOfIndex(inst); - const pointee_ty = ptr_ty.childType(); + const pointee_ty = ptr_ty.childType(mod); if (func.initial_stack_value == .none) { try func.initializeStack(); @@ -1766,8 +1766,7 @@ fn isByRef(ty: Type, mod: *const Module) bool { }, .Optional => { if (ty.isPtrLikeOptional(mod)) return false; - var buf: Type.Payload.ElemType = undefined; - const pl_type = ty.optionalChild(&buf); + const pl_type = ty.optionalChild(mod); if (pl_type.zigTypeTag(mod) == .ErrorSet) return false; return pl_type.hasRuntimeBitsIgnoreComptime(mod); }, @@ -2139,7 +2138,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; - const child_type = func.typeOfIndex(inst).childType(); + const child_type = func.typeOfIndex(inst).childType(mod); var result = result: { if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { @@ -2161,7 +2160,7 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ret_ty = func.typeOf(un_op).childType(); + const ret_ty = func.typeOf(un_op).childType(mod); const fn_info = func.decl.ty.fnInfo(); if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2188,7 +2187,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const mod = func.bin_file.base.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; const ret_ty = fn_ty.fnReturnType(); @@ -2301,8 +2300,8 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); const ptr_ty = func.typeOf(bin_op.lhs); - const ptr_info = ptr_ty.ptrInfo().data; - const ty = ptr_ty.childType(); + const ptr_info = ptr_ty.ptrInfo(mod); + const ty = ptr_ty.childType(mod); if (ptr_info.host_size == 0) { try func.store(lhs, rhs, ty, 0); @@ -2360,8 +2359,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE if (ty.isPtrLikeOptional(mod)) { return func.store(lhs, rhs, Type.usize, 0); } - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); + const pl_ty = ty.optionalChild(mod); if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.u8, 0); } @@ -2454,7 +2452,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const ty = func.air.getRefType(ty_op.ty); const ptr_ty = func.typeOf(ty_op.operand); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand}); @@ -2971,7 +2969,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue break :blk field_offset; }, }, - .Pointer => switch (parent_ty.ptrSize()) { + .Pointer => switch (parent_ty.ptrSize(mod)) { .Slice => switch (field_ptr.field_index) { 0 => 0, 1 => func.ptrSize(), @@ -3001,11 +2999,7 @@ fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.In const mod = func.bin_file.base.options.module.?; const decl = mod.declPtr(decl_index); mod.markDeclAlive(decl); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = decl.ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(decl.ty); return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index, offset); } @@ -3145,8 +3139,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, .Optional => if (ty.optionalReprIsPayload(mod)) { - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); + const pl_ty = ty.optionalChild(mod); if (val.castTag(.opt_payload)) |payload| { return func.lowerConstant(payload.data, pl_ty); } else if (val.isNull(mod)) { @@ -3217,8 +3210,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { else => unreachable, }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); + const pl_ty = ty.optionalChild(mod); if (ty.optionalReprIsPayload(mod)) { return func.emitUndefined(pl_ty); } @@ -3403,8 +3395,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO assert(!(lhs != .stack and rhs == .stack)); const mod = func.bin_file.base.options.module.?; if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // When we hit this case, we must check the value of optionals // that are not pointers. This means first checking against non-null for @@ -3609,19 +3600,21 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn } fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.StructField, ty_pl.payload); const struct_ptr = try func.resolveInst(extra.data.struct_operand); - const struct_ty = func.typeOf(extra.data.struct_operand).childType(); + const struct_ty = func.typeOf(extra.data.struct_operand).childType(mod); const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ty, extra.data.field_index); func.finishAir(inst, result, &.{extra.data.struct_operand}); } fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try func.resolveInst(ty_op.operand); - const struct_ty = func.typeOf(ty_op.operand).childType(); + const struct_ty = func.typeOf(ty_op.operand).childType(mod); const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ty, index); func.finishAir(inst, result, &.{ty_op.operand}); @@ -3640,7 +3633,7 @@ fn structFieldPtr( const offset = switch (struct_ty.containerLayout()) { .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { - if (result_ty.ptrInfo().data.host_size != 0) { + if (result_ty.ptrInfo(mod).host_size != 0) { break :offset @as(u32, 0); } break :offset struct_ty.packedStructFieldByteOffset(index, mod); @@ -3981,7 +3974,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const operand = try func.resolveInst(ty_op.operand); const op_ty = func.typeOf(ty_op.operand); - const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; + const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; const payload_ty = err_ty.errorUnionPayload(); const result = result: { @@ -4009,7 +4002,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) const operand = try func.resolveInst(ty_op.operand); const op_ty = func.typeOf(ty_op.operand); - const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; + const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; const payload_ty = err_ty.errorUnionPayload(); const result = result: { @@ -4156,11 +4149,12 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const op_ty = func.typeOf(un_op); - const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty; + const optional_ty = if (op_kind == .ptr) op_ty.childType(mod) else op_ty; const is_null = try func.isNull(operand, optional_ty, opcode); const result = try is_null.toLocal(func, optional_ty); func.finishAir(inst, result, &.{un_op}); @@ -4171,8 +4165,7 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { const mod = func.bin_file.base.options.module.?; try func.emitWValue(operand); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const payload_ty = optional_ty.optionalChild(mod); if (!optional_ty.optionalReprIsPayload(mod)) { // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value @@ -4221,14 +4214,13 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.typeOf(ty_op.operand).childType(); + const opt_ty = func.typeOf(ty_op.operand).childType(mod); - const mod = func.bin_file.base.options.module.?; const result = result: { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); + const payload_ty = opt_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } @@ -4242,9 +4234,8 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); + const opt_ty = func.typeOf(ty_op.operand).childType(mod); + const payload_ty = opt_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } @@ -4325,13 +4316,13 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const slice_ty = func.typeOf(bin_op.lhs); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = slice_ty.childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); // load pointer onto stack @@ -4355,11 +4346,11 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod); const elem_size = elem_ty.abiSize(mod); const slice = try func.resolveInst(bin_op.lhs); @@ -4436,7 +4427,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const array_ty = func.typeOf(ty_op.operand).childType(); + const array_ty = func.typeOf(ty_op.operand).childType(mod); const slice_ty = func.air.getRefType(ty_op.ty); // create a slice on the stack @@ -4448,7 +4439,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } // store the length of the array in the slice - const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen()) }; + const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen(mod)) }; try func.store(slice_local, len, Type.usize, func.ptrSize()); func.finishAir(inst, slice_local, &.{ty_op.operand}); @@ -4470,13 +4461,13 @@ fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr_ty = func.typeOf(bin_op.lhs); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = ptr_ty.childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = ptr_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); // load pointer onto the stack @@ -4507,12 +4498,12 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = func.typeOf(bin_op.lhs); - const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod); const elem_size = elem_ty.abiSize(mod); const ptr = try func.resolveInst(bin_op.lhs); @@ -4544,9 +4535,9 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const offset = try func.resolveInst(bin_op.rhs); const ptr_ty = func.typeOf(bin_op.lhs); - const pointee_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const pointee_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const valtype = typeToValtype(Type.usize, mod); @@ -4565,6 +4556,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -4575,16 +4567,16 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const ptr = try func.resolveInst(bin_op.lhs); const ptr_ty = func.typeOf(bin_op.lhs); const value = try func.resolveInst(bin_op.rhs); - const len = switch (ptr_ty.ptrSize()) { + const len = switch (ptr_ty.ptrSize(mod)) { .Slice => try func.sliceLen(ptr), - .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType().arrayLen()) }), + .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType(mod).arrayLen(mod)) }), .C, .Many => unreachable, }; - const elem_ty = if (ptr_ty.ptrSize() == .One) - ptr_ty.childType().childType() + const elem_ty = if (ptr_ty.ptrSize(mod) == .One) + ptr_ty.childType(mod).childType(mod) else - ptr_ty.childType(); + ptr_ty.childType(mod); const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty); try func.memset(elem_ty, dst_ptr, len, value); @@ -4686,13 +4678,13 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue } fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const array_ty = func.typeOf(bin_op.lhs); const array = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = array_ty.childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = array_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); if (isByRef(array_ty, mod)) { @@ -4810,7 +4802,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.typeOfIndex(inst); - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); if (determineSimdStoreStrategy(ty, mod) == .direct) blk: { switch (operand) { @@ -4859,7 +4851,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } const elem_size = elem_ty.bitSize(mod); - const vector_len = @intCast(usize, ty.vectorLen()); + const vector_len = @intCast(usize, ty.vectorLen(mod)); if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) { return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); } @@ -4895,7 +4887,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mask = func.air.values[extra.mask]; const mask_len = extra.mask_len; - const child_ty = inst_ty.childType(); + const child_ty = inst_ty.childType(mod); const elem_size = child_ty.abiSize(mod); // TODO: One of them could be by ref; handle in loop @@ -4959,16 +4951,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const result_ty = func.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const len = @intCast(usize, result_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); const result: WValue = result_value: { switch (result_ty.zigTypeTag(mod)) { .Array => { const result = try func.allocStack(result_ty); - const elem_ty = result_ty.childType(); + const elem_ty = result_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); - const sentinel = if (result_ty.sentinel()) |sent| blk: { + const sentinel = if (result_ty.sentinel(mod)) |sent| blk: { break :blk try func.lowerConstant(sent, elem_ty); } else null; @@ -5190,8 +5182,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: const mod = func.bin_file.base.options.module.?; assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod)); assert(op == .eq or op == .neq); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = operand_ty.optionalChild(&buf); + const payload_ty = operand_ty.optionalChild(mod); // We store the final result in here that will be validated // if the optional is truly equal. @@ -5268,7 +5259,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const un_ty = func.typeOf(bin_op.lhs).childType(); + const un_ty = func.typeOf(bin_op.lhs).childType(mod); const tag_ty = func.typeOf(bin_op.rhs); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); @@ -5398,7 +5389,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const err_set_ty = func.typeOf(ty_op.operand).childType(); + const err_set_ty = func.typeOf(ty_op.operand).childType(mod); const payload_ty = err_set_ty.errorUnionPayload(); const operand = try func.resolveInst(ty_op.operand); @@ -5426,7 +5417,7 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try func.resolveInst(extra.field_ptr); - const parent_ty = func.air.getRefType(ty_pl.ty).childType(); + const parent_ty = func.air.getRefType(ty_pl.ty).childType(mod); const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const result = if (field_offset != 0) result: { @@ -5455,10 +5446,10 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const dst = try func.resolveInst(bin_op.lhs); const dst_ty = func.typeOf(bin_op.lhs); - const ptr_elem_ty = dst_ty.childType(); + const ptr_elem_ty = dst_ty.childType(mod); const src = try func.resolveInst(bin_op.rhs); const src_ty = func.typeOf(bin_op.rhs); - const len = switch (dst_ty.ptrSize()) { + const len = switch (dst_ty.ptrSize(mod)) { .Slice => blk: { const slice_len = try func.sliceLen(dst); if (ptr_elem_ty.abiSize(mod) != 1) { @@ -5470,7 +5461,7 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :blk slice_len; }, .One => @as(WValue, .{ - .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(mod)), + .imm32 = @intCast(u32, ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod)), }), .C, .Many => unreachable, }; @@ -5551,7 +5542,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // As the names are global and the slice elements are constant, we do not have // to make a copy of the ptr+value but can point towards them directly. const error_table_symbol = try func.bin_file.getErrorTableSymbol(); - const name_ty = Type.initTag(.const_slice_u8_sentinel_0); + const name_ty = Type.const_slice_u8_sentinel_0; const mod = func.bin_file.base.options.module.?; const abi_size = name_ty.abiSize(mod); @@ -5857,7 +5848,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addLabel(.local_set, overflow_bit.local.value); break :blk try func.wrapOperand(bin_op, lhs_ty); } else if (int_info.bits == 64 and int_info.signedness == .unsigned) blk: { - const new_ty = Type.initTag(.u128); + const new_ty = Type.u128; var lhs_upcast = try (try func.intcast(lhs, lhs_ty, new_ty)).toLocal(func, lhs_ty); defer lhs_upcast.free(func); var rhs_upcast = try (try func.intcast(rhs, lhs_ty, new_ty)).toLocal(func, lhs_ty); @@ -5878,7 +5869,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = try func.callIntrinsic( "__multi3", &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + Type.i128, &.{ lhs, lhs_shifted, rhs, rhs_shifted }, ); const res = try func.allocLocal(lhs_ty); @@ -5902,19 +5893,19 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mul1 = try func.callIntrinsic( "__multi3", &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + Type.i128, &.{ lhs_lsb, zero, rhs_msb, zero }, ); const mul2 = try func.callIntrinsic( "__multi3", &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + Type.i128, &.{ rhs_lsb, zero, lhs_msb, zero }, ); const mul3 = try func.callIntrinsic( "__multi3", &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + Type.i128, &.{ lhs_msb, zero, rhs_msb, zero }, ); @@ -5942,7 +5933,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { _ = try func.binOp(lsb_or, mul_add_lt, Type.bool, .@"or"); try func.addLabel(.local_set, overflow_bit.local.value); - const tmp_result = try func.allocStack(Type.initTag(.u128)); + const tmp_result = try func.allocStack(Type.u128); try func.emitWValue(tmp_result); const mul3_msb = try func.load(mul3, Type.u64, 0); try func.store(.stack, mul3_msb, Type.u64, tmp_result.offset()); @@ -6191,11 +6182,12 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try func.resolveInst(extra.data.ptr); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.typeOf(extra.data.ptr).childType(); + const err_union_ty = func.typeOf(extra.data.ptr).childType(mod); const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true); func.finishAir(inst, result, &.{extra.data.ptr}); } @@ -6845,11 +6837,11 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { for (enum_ty.enumFields().keys(), 0..) |tag_name, field_index| { // for each tag name, create an unnamed const, // and then get a pointer to its value. - var name_ty_payload: Type.Payload.Len = .{ - .base = .{ .tag = .array_u8_sentinel_0 }, - .data = @intCast(u64, tag_name.len), - }; - const name_ty = Type.initPayload(&name_ty_payload.base); + const name_ty = try mod.arrayType(.{ + .len = tag_name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const string_bytes = &mod.string_literal_bytes; try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len); const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, tag_name, Module.StringLiteralAdapter{ @@ -6972,7 +6964,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // finish function body try writer.writeByte(std.wasm.opcode(.end)); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } @@ -7068,7 +7060,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr_ty = func.typeOf(extra.ptr); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const result_ty = func.typeOfIndex(inst); const ptr_operand = try func.resolveInst(extra.ptr); @@ -7355,7 +7347,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const operand = try func.resolveInst(bin_op.rhs); const ptr_ty = func.typeOf(bin_op.lhs); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); if (func.useAtomicFeature()) { const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ad67a0db3d78..f6304a0ff336 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2259,7 +2259,7 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { const mod = self.bin_file.options.module.?; const ptr_ty = self.typeOfIndex(inst); - const val_ty = ptr_ty.childType(); + const val_ty = ptr_ty.childType(mod); return self.allocFrameIndex(FrameAlloc.init(.{ .size = math.cast(u32, val_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)}); @@ -2289,8 +2289,8 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b 80 => break :need_mem, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { 16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16, 80 => break :need_mem, else => unreachable, @@ -2727,12 +2727,12 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); if (dst_ty.zigTypeTag(mod) == .Vector) { - assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen() == src_ty.vectorLen()); - const dst_info = dst_ty.childType().intInfo(mod); - const src_info = src_ty.childType().intInfo(mod); + assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen(mod) == src_ty.vectorLen(mod)); + const dst_info = dst_ty.childType(mod).intInfo(mod); + const src_info = src_ty.childType(mod).intInfo(mod); const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_info.bits) { 8 => switch (src_info.bits) { - 16 => switch (dst_ty.vectorLen()) { + 16 => switch (dst_ty.vectorLen(mod)) { 1...8 => if (self.hasFeature(.avx)) .{ .vp_b, .ackusw } else .{ .p_b, .ackusw }, 9...16 => if (self.hasFeature(.avx2)) .{ .vp_b, .ackusw } else null, else => null, @@ -2740,7 +2740,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { else => null, }, 16 => switch (src_info.bits) { - 32 => switch (dst_ty.vectorLen()) { + 32 => switch (dst_ty.vectorLen(mod)) { 1...4 => if (self.hasFeature(.avx)) .{ .vp_w, .ackusd } else if (self.hasFeature(.sse4_1)) @@ -2769,14 +2769,10 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { }; const splat_val = Value.initPayload(&splat_pl.base); - var full_pl = Type.Payload.Array{ - .base = .{ .tag = .vector }, - .data = .{ - .len = @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits), - .elem_type = src_ty.childType(), - }, - }; - const full_ty = Type.initPayload(&full_pl.base); + const full_ty = try mod.vectorType(.{ + .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), + .child = src_ty.childType(mod).ip_index, + }); const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val }); @@ -3587,7 +3583,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const result = result: { const dst_ty = self.typeOfIndex(inst); const src_ty = self.typeOf(ty_op.operand); - const opt_ty = src_ty.childType(); + const opt_ty = src_ty.childType(mod); const src_mcv = try self.resolveInst(ty_op.operand); if (opt_ty.optionalReprIsPayload(mod)) { @@ -3607,7 +3603,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); - const pl_ty = dst_ty.childType(); + const pl_ty = dst_ty.childType(mod); const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 }); break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv; @@ -3737,7 +3733,7 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const eu_ty = src_ty.childType(); + const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(); const err_ty = eu_ty.errorUnionSet(); const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); @@ -3777,7 +3773,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const eu_ty = src_ty.childType(); + const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(); const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); @@ -3803,7 +3799,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const eu_ty = src_ty.childType(); + const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(); const err_ty = eu_ty.errorUnionSet(); const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); @@ -4057,7 +4053,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { }; defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = slice_ty.childType(); + const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); @@ -4116,7 +4112,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { }; defer if (array_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = array_ty.childType(); + const elem_ty = array_ty.childType(mod); const elem_abi_size = elem_ty.abiSize(mod); const index_ty = self.typeOf(bin_op.rhs); @@ -4253,7 +4249,7 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_union_ty = self.typeOf(bin_op.lhs); - const union_ty = ptr_union_ty.childType(); + const union_ty = ptr_union_ty.childType(mod); const tag_ty = self.typeOf(bin_op.rhs); const layout = union_ty.unionGetLayout(mod); @@ -4287,7 +4283,9 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { break :blk MCValue{ .register = reg }; } else ptr; - var ptr_tag_pl = ptr_union_ty.ptrInfo(); + var ptr_tag_pl: Type.Payload.Pointer = .{ + .data = ptr_union_ty.ptrInfo(mod), + }; ptr_tag_pl.data.pointee_type = tag_ty; const ptr_tag_ty = Type.initPayload(&ptr_tag_pl.base); try self.store(ptr_tag_ty, adjusted_ptr, tag); @@ -4924,14 +4922,11 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { var stack align(@alignOf(ExpectedContents)) = std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - var vec_pl = Type.Payload.Array{ - .base = .{ .tag = .vector }, - .data = .{ - .len = @divExact(abi_size * 8, scalar_bits), - .elem_type = try mod.intType(.signed, scalar_bits), - }, - }; - const vec_ty = Type.initPayload(&vec_pl.base); + const vec_ty = try mod.vectorType(.{ + .len = @divExact(abi_size * 8, scalar_bits), + .child = (try mod.intType(.signed, scalar_bits)).ip_index, + }); + const sign_val = switch (tag) { .neg => try vec_ty.minInt(stack.get(), mod), .fabs => try vec_ty.maxInt(stack.get(), mod), @@ -5034,15 +5029,15 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, 2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else .{ ._ps, .round }, 5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else null, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, 2 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else .{ ._pd, .round }, 3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else null, @@ -5131,9 +5126,9 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(mod)) { 1 => { try self.asmRegisterRegister( .{ .v_ps, .cvtph2 }, @@ -5184,13 +5179,13 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { }, else => null, } else null, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .sqrt } else .{ ._ss, .sqrt }, 2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else .{ ._ps, .sqrt }, 5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else null, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt }, 2 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else .{ ._pd, .sqrt }, 3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else null, @@ -5292,7 +5287,7 @@ fn reuseOperandAdvanced( fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const val_ty = ptr_info.pointee_type; const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); @@ -5365,7 +5360,8 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn } fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { - const dst_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const dst_ty = ptr_ty.childType(mod); switch (ptr_mcv) { .none, .unreach, @@ -5424,7 +5420,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { else try self.allocRegOrMem(inst, true); - if (ptr_ty.ptrInfo().data.host_size > 0) { + if (ptr_ty.ptrInfo(mod).host_size > 0) { try self.packedLoad(dst_mcv, ptr_ty, ptr_mcv); } else { try self.load(dst_mcv, ptr_ty, ptr_mcv); @@ -5436,8 +5432,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const ptr_info = ptr_ty.ptrInfo().data; - const src_ty = ptr_ty.childType(); + const ptr_info = ptr_ty.ptrInfo(mod); + const src_ty = ptr_ty.childType(mod); const limb_abi_size: u16 = @min(ptr_info.host_size, 8); const limb_abi_bits = limb_abi_size * 8; @@ -5509,7 +5505,8 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In } fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const src_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const src_ty = ptr_ty.childType(mod); switch (ptr_mcv) { .none, .unreach, @@ -5544,6 +5541,7 @@ fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerErr } fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { + const mod = self.bin_file.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -5553,7 +5551,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const ptr_mcv = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const src_mcv = try self.resolveInst(bin_op.rhs); - if (ptr_ty.ptrInfo().data.host_size > 0) { + if (ptr_ty.ptrInfo(mod).host_size > 0) { try self.packedStore(ptr_ty, ptr_mcv, src_mcv); } else { try self.store(ptr_ty, ptr_mcv, src_mcv); @@ -5578,11 +5576,11 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 const mod = self.bin_file.options.module.?; const ptr_field_ty = self.typeOfIndex(inst); const ptr_container_ty = self.typeOf(operand); - const container_ty = ptr_container_ty.childType(); + const container_ty = ptr_container_ty.childType(mod); const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { .Auto, .Extern => container_ty.structFieldOffset(index, mod), .Packed => if (container_ty.zigTypeTag(mod) == .Struct and - ptr_field_ty.ptrInfo().data.host_size == 0) + ptr_field_ty.ptrInfo(mod).host_size == 0) container_ty.packedStructFieldByteOffset(index, mod) else 0, @@ -5760,7 +5758,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const inst_ty = self.typeOfIndex(inst); - const parent_ty = inst_ty.childType(); + const parent_ty = inst_ty.childType(mod); const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); const src_mcv = try self.resolveInst(extra.field_ptr); @@ -6680,10 +6678,10 @@ fn genBinOp( 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { else => null, - .Int => switch (lhs_ty.childType().intInfo(mod).bits) { - 8 => switch (lhs_ty.vectorLen()) { + .Int => switch (lhs_ty.childType(mod).intInfo(mod).bits) { + 8 => switch (lhs_ty.vectorLen(mod)) { 1...16 => switch (air_tag) { .add, .addwrap, @@ -6694,7 +6692,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .mins } else if (self.hasFeature(.sse4_1)) @@ -6708,7 +6706,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6734,11 +6732,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null, }, @@ -6746,7 +6744,7 @@ fn genBinOp( }, else => null, }, - 16 => switch (lhs_ty.vectorLen()) { + 16 => switch (lhs_ty.vectorLen(mod)) { 1...8 => switch (air_tag) { .add, .addwrap, @@ -6760,7 +6758,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .mins } else @@ -6770,7 +6768,7 @@ fn genBinOp( else .{ .p_w, .minu }, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .maxs } else @@ -6795,11 +6793,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null, }, @@ -6807,7 +6805,7 @@ fn genBinOp( }, else => null, }, - 32 => switch (lhs_ty.vectorLen()) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => switch (air_tag) { .add, .addwrap, @@ -6826,7 +6824,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .mins } else if (self.hasFeature(.sse4_1)) @@ -6840,7 +6838,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6869,11 +6867,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null, }, @@ -6881,7 +6879,7 @@ fn genBinOp( }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => switch (air_tag) { .add, .addwrap, @@ -6910,8 +6908,8 @@ fn genBinOp( }, else => null, }, - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen()) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen(mod)) { 1 => { const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128(); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); @@ -7086,7 +7084,7 @@ fn genBinOp( }, else => null, } else null, - 32 => switch (lhs_ty.vectorLen()) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => switch (air_tag) { .add => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add }, .sub => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub }, @@ -7124,7 +7122,7 @@ fn genBinOp( } else null, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => switch (air_tag) { .add => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add }, .sub => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub }, @@ -7236,14 +7234,14 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ .v_ss, .cmp }, 2...8 => .{ .v_ps, .cmp }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ .v_sd, .cmp }, 2...4 => .{ .v_pd, .cmp }, else => null, @@ -7270,13 +7268,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...8 => .{ .v_ps, .blendv }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ .v_pd, .blendv }, else => null, }, @@ -7304,14 +7302,14 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ ._ss, .cmp }, 2...4 => .{ ._ps, .cmp }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ ._sd, .cmp }, 2 => .{ ._pd, .cmp }, else => null, @@ -7337,13 +7335,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .blendv }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .blendv }, else => null, }, @@ -7368,13 +7366,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .@"and" }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .@"and" }, else => null, }, @@ -7398,13 +7396,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .andn }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .andn }, else => null, }, @@ -7428,13 +7426,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .@"or" }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .@"or" }, else => null, }, @@ -7586,11 +7584,7 @@ fn genBinOpMir( .load_got, .load_tlv, => { - var ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_const_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_pl.base); + const ptr_ty = try mod.singleConstPtrType(ty); const addr_reg = try self.copyToTmpRegister(ptr_ty, src_mcv.address()); return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg }, @@ -8058,7 +8052,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -8506,10 +8500,11 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { } fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(mod); const result = try self.genTry(inst, extra.data.ptr, body, err_union_ty, true); return self.finishAir(inst, result, .{ .none, .none, .none }); } @@ -8683,8 +8678,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - var pl_buf: Type.Payload.ElemType = undefined; - const pl_ty = opt_ty.optionalChild(&pl_buf); + const pl_ty = opt_ty.optionalChild(mod); var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) @@ -8775,9 +8769,8 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - const opt_ty = ptr_ty.childType(); - var pl_buf: Type.Payload.ElemType = undefined; - const pl_ty = opt_ty.optionalChild(&pl_buf); + const opt_ty = ptr_ty.childType(mod); + const pl_ty = opt_ty.optionalChild(mod); var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) @@ -8919,6 +8912,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_ptr = try self.resolveInst(un_op); @@ -8939,7 +8933,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); - const result = try self.isErr(inst, ptr_ty.childType(), operand); + const result = try self.isErr(inst, ptr_ty.childType(mod), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8953,6 +8947,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_ptr = try self.resolveInst(un_op); @@ -8973,7 +8968,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); - const result = try self.isNonErr(inst, ptr_ty.childType(), operand); + const result = try self.isNonErr(inst, ptr_ty.childType(mod), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -9452,9 +9447,9 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, else => {}, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Int => switch (ty.childType().intInfo(mod).bits) { - 8 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Int => switch (ty.childType(mod).intInfo(mod).bits) { + 8 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{ .insert = .{ .vp_b, .insr }, .extract = .{ .vp_b, .extr }, @@ -9484,7 +9479,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 16 => switch (ty.vectorLen()) { + 16 => switch (ty.vectorLen(mod)) { 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, @@ -9507,7 +9502,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_d, .mov } else @@ -9523,7 +9518,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_q, .mov } else @@ -9535,7 +9530,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 128 => switch (ty.vectorLen()) { + 128 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, @@ -9543,15 +9538,15 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 256 => switch (ty.vectorLen()) { + 256 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, else => {}, }, - .Float => switch (ty.childType().floatBits(self.target.*)) { - 16 => switch (ty.vectorLen()) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 16 => switch (ty.vectorLen(mod)) { 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, @@ -9574,7 +9569,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_ss, .mov } else @@ -9590,7 +9585,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, else => {}, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_sd, .mov } else @@ -9602,7 +9597,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_pd, .mova } else .{ .v_pd, .movu } }, else => {}, }, - 128 => switch (ty.vectorLen()) { + 128 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, @@ -10248,8 +10243,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const slice_ty = self.typeOfIndex(inst); const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = array_ty.arrayLen(); + const array_ty = ptr_ty.childType(mod); + const array_len = array_ty.arrayLen(mod); const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); @@ -10790,16 +10785,16 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod)); if (elem_abi_size == 1) { - const ptr: MCValue = switch (dst_ptr_ty.ptrSize()) { + const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { // TODO: this only handles slices stored in the stack .Slice => dst_ptr, .One => dst_ptr, .C, .Many => unreachable, }; - const len: MCValue = switch (dst_ptr_ty.ptrSize()) { + const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { // TODO: this only handles slices stored in the stack .Slice => dst_ptr.address().offset(8).deref(), - .One => .{ .immediate = dst_ptr_ty.childType().arrayLen() }, + .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) }, .C, .Many => unreachable, }; const len_lock: ?RegisterLock = switch (len) { @@ -10815,7 +10810,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { // Store the first element, and then rely on memcpy copying forwards. // Length zero requires a runtime check - so we handle arrays specially // here to elide it. - switch (dst_ptr_ty.ptrSize()) { + switch (dst_ptr_ty.ptrSize(mod)) { .Slice => { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf); @@ -10858,13 +10853,9 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { try self.performReloc(skip_reloc); }, .One => { - var elem_ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_mut_pointer }, - .data = elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_pl.base); + const elem_ptr_ty = try mod.singleMutPtrType(elem_ty); - const len = dst_ptr_ty.childType().arrayLen(); + const len = dst_ptr_ty.childType(mod).arrayLen(mod); assert(len != 0); // prevented by Sema try self.store(elem_ptr_ty, dst_ptr, src_val); @@ -10889,6 +10880,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { } fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); @@ -10906,9 +10898,9 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { }; defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock); - const len: MCValue = switch (dst_ptr_ty.ptrSize()) { + const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { .Slice => dst_ptr.address().offset(8).deref(), - .One => .{ .immediate = dst_ptr_ty.childType().arrayLen() }, + .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) }, .C, .Many => unreachable, }; const len_lock: ?RegisterLock = switch (len) { @@ -11059,7 +11051,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { switch (scalar_ty.zigTypeTag(mod)) { else => {}, .Float => switch (scalar_ty.floatBits(self.target.*)) { - 32 => switch (vector_ty.vectorLen()) { + 32 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11139,7 +11131,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { }, else => {}, }, - 64 => switch (vector_ty.vectorLen()) { + 64 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11205,7 +11197,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { }, else => {}, }, - 128 => switch (vector_ty.vectorLen()) { + 128 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11271,7 +11263,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result_ty = self.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const len = @intCast(usize, result_ty.arrayLen(mod)); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = result: { @@ -11375,7 +11367,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .Array => { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); - const elem_ty = result_ty.childType(); + const elem_ty = result_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); for (elements, 0..) |elem, elem_i| { @@ -11387,7 +11379,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const elem_off = @intCast(i32, elem_size * elem_i); try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv); } - if (result_ty.sentinel()) |sentinel| try self.genSetMem( + if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, elem_size * elements.len), elem_ty, @@ -11512,14 +11504,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd132 }, 2...8 => .{ .v_ps, .fmadd132 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd132 }, 2...4 => .{ .v_pd, .fmadd132 }, else => null, @@ -11539,14 +11531,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd213 }, 2...8 => .{ .v_ps, .fmadd213 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd213 }, 2...4 => .{ .v_pd, .fmadd213 }, else => null, @@ -11566,14 +11558,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd231 }, 2...8 => .{ .v_ps, .fmadd231 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd231 }, 2...4 => .{ .v_pd, .fmadd231 }, else => null, diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index c8d20c73fa25..ea75a1f4d2ff 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -76,7 +76,7 @@ pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class { }; var result = [1]Class{.none} ** 8; switch (ty.zigTypeTag(mod)) { - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice => { result[0] = .integer; result[1] = .integer; @@ -158,8 +158,8 @@ pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class { else => unreachable, }, .Vector => { - const elem_ty = ty.childType(); - const bits = elem_ty.bitSize(mod) * ty.arrayLen(); + const elem_ty = ty.childType(mod); + const bits = elem_ty.bitSize(mod) * ty.arrayLen(mod); if (bits <= 64) return .{ .sse, .none, .none, .none, .none, .none, .none, .none, diff --git a/src/codegen.zig b/src/codegen.zig index c9e2c6c265da..a80740050233 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -230,7 +230,7 @@ pub fn generateSymbol( .Array => switch (typed_value.val.tag()) { .bytes => { const bytes = typed_value.val.castTag(.bytes).?.data; - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel()); + const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod)); // The bytes payload already includes the sentinel, if any try code.ensureUnusedCapacity(len); code.appendSliceAssumeCapacity(bytes[0..len]); @@ -241,7 +241,7 @@ pub fn generateSymbol( const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; try code.ensureUnusedCapacity(bytes.len + 1); code.appendSliceAssumeCapacity(bytes); - if (typed_value.ty.sentinel()) |sent_val| { + if (typed_value.ty.sentinel(mod)) |sent_val| { const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); code.appendAssumeCapacity(byte); } @@ -249,8 +249,8 @@ pub fn generateSymbol( }, .aggregate => { const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.elemType(); - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel()); + const elem_ty = typed_value.ty.childType(mod); + const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod)); for (elem_vals[0..len]) |elem_val| { switch (try generateSymbol(bin_file, src_loc, .{ .ty = elem_ty, @@ -264,9 +264,9 @@ pub fn generateSymbol( }, .repeated => { const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(); - const sentinel = typed_value.ty.sentinel(); - const len = typed_value.ty.arrayLen(); + const elem_ty = typed_value.ty.childType(mod); + const sentinel = typed_value.ty.sentinel(mod); + const len = typed_value.ty.arrayLen(mod); var index: u64 = 0; while (index < len) : (index += 1) { @@ -292,8 +292,8 @@ pub fn generateSymbol( return Result.ok; }, .empty_array_sentinel => { - const elem_ty = typed_value.ty.childType(); - const sentinel_val = typed_value.ty.sentinel().?; + const elem_ty = typed_value.ty.childType(mod); + const sentinel_val = typed_value.ty.sentinel(mod).?; switch (try generateSymbol(bin_file, src_loc, .{ .ty = elem_ty, .val = sentinel_val, @@ -618,8 +618,7 @@ pub fn generateSymbol( return Result.ok; }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_type = typed_value.ty.optionalChild(&opt_buf); + const payload_type = typed_value.ty.optionalChild(mod); const is_pl = !typed_value.val.isNull(mod); const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; @@ -751,7 +750,7 @@ pub fn generateSymbol( .Vector => switch (typed_value.val.tag()) { .bytes => { const bytes = typed_value.val.castTag(.bytes).?.data; - const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; + const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse return error.Overflow; try code.ensureUnusedCapacity(len + padding); @@ -761,8 +760,8 @@ pub fn generateSymbol( }, .aggregate => { const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.elemType(); - const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; + const elem_ty = typed_value.ty.childType(mod); + const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; const padding = math.cast(usize, typed_value.ty.abiSize(mod) - (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, @@ -782,8 +781,8 @@ pub fn generateSymbol( }, .repeated => { const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(); - const len = typed_value.ty.arrayLen(); + const elem_ty = typed_value.ty.childType(mod); + const len = typed_value.ty.arrayLen(mod); const padding = math.cast(usize, typed_value.ty.abiSize(mod) - (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, @@ -1188,7 +1187,7 @@ pub fn genTypedValue( switch (typed_value.ty.zigTypeTag(mod)) { .Void => return GenResult.mcv(.none), - .Pointer => switch (typed_value.ty.ptrSize()) { + .Pointer => switch (typed_value.ty.ptrSize(mod)) { .Slice => {}, else => { switch (typed_value.val.tag()) { @@ -1219,9 +1218,8 @@ pub fn genTypedValue( if (typed_value.ty.isPtrLikeOptional(mod)) { if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 }); - var buf: Type.Payload.ElemType = undefined; return genTypedValue(bin_file, src_loc, .{ - .ty = typed_value.ty.optionalChild(&buf), + .ty = typed_value.ty.optionalChild(mod), .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val, }, owner_decl_index); } else if (typed_value.ty.abiSize(mod) == 1) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index cd4f36e5740d..e6ec461e4311 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -625,7 +625,9 @@ pub const DeclGen = struct { // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(field_ptr.container_ty, .complete); - var container_ptr_pl = ptr_ty.ptrInfo(); + var container_ptr_pl: Type.Payload.Pointer = .{ + .data = ptr_ty.ptrInfo(mod), + }; container_ptr_pl.data.pointee_type = field_ptr.container_ty; const container_ptr_ty = Type.initPayload(&container_ptr_pl.base); @@ -653,7 +655,9 @@ pub const DeclGen = struct { try dg.writeCValue(writer, field); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = ptr_ty.ptrInfo(); + var u8_ptr_pl: Type.Payload.Pointer = .{ + .data = ptr_ty.ptrInfo(mod), + }; u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); @@ -692,11 +696,10 @@ pub const DeclGen = struct { }, .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var elem_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = elem_ptr.elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base); + const elem_ptr_ty = try mod.ptrType(.{ + .size = .C, + .elem_type = elem_ptr.elem_ty.ip_index, + }); try writer.writeAll("&("); try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location); @@ -704,11 +707,10 @@ pub const DeclGen = struct { }, .opt_payload_ptr, .eu_payload_ptr => { const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - var container_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = payload_ptr.container_ty, - }; - const container_ptr_ty = Type.initPayload(&container_ptr_ty_pl.base); + const container_ptr_ty = try mod.ptrType(.{ + .elem_type = payload_ptr.container_ty.ip_index, + .size = .C, + }); // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(payload_ptr.container_ty, .complete); @@ -794,8 +796,7 @@ pub const DeclGen = struct { return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); + const payload_ty = ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, Type.bool, val, location); @@ -889,11 +890,11 @@ pub const DeclGen = struct { return writer.writeAll(" }"); }, .Array, .Vector => { - const ai = ty.arrayInfo(); + const ai = ty.arrayInfo(mod); if (ai.elem_type.eql(Type.u8, dg.module)) { var literal = stringLiteral(writer); try literal.start(); - const c_len = ty.arrayLenIncludingSentinel(); + const c_len = ty.arrayLenIncludingSentinel(mod); var index: u64 = 0; while (index < c_len) : (index += 1) try literal.writeChar(0xaa); @@ -906,11 +907,11 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - const c_len = ty.arrayLenIncludingSentinel(); + const c_len = ty.arrayLenIncludingSentinel(mod); var index: u64 = 0; while (index < c_len) : (index += 1) { if (index > 0) try writer.writeAll(", "); - try dg.renderValue(writer, ty.childType(), val, initializer_type); + try dg.renderValue(writer, ty.childType(mod), val, initializer_type); } return writer.writeByte('}'); } @@ -1110,7 +1111,7 @@ pub const DeclGen = struct { // First try specific tag representations for more efficiency. switch (val.tag()) { .undef, .empty_struct_value, .empty_array => { - const ai = ty.arrayInfo(); + const ai = ty.arrayInfo(mod); try writer.writeByte('{'); if (ai.sentinel) |s| { try dg.renderValue(writer, ai.elem_type, s, initializer_type); @@ -1128,9 +1129,9 @@ pub const DeclGen = struct { }, else => unreachable, }; - const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; + const sentinel = if (ty.sentinel(mod)) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; try writer.print("{s}", .{ - fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen())], sentinel), + fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen(mod))], sentinel), }); }, else => { @@ -1142,7 +1143,7 @@ pub const DeclGen = struct { // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal const max_string_initializer_len = 65535; - const ai = ty.arrayInfo(); + const ai = ty.arrayInfo(mod); if (ai.elem_type.eql(Type.u8, dg.module)) { if (ai.len <= max_string_initializer_len) { var literal = stringLiteral(writer); @@ -1198,8 +1199,7 @@ pub const DeclGen = struct { } }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); + const payload_ty = ty.optionalChild(mod); const is_null_val = Value.makeBool(val.tag() == .null_value); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) @@ -2410,12 +2410,13 @@ pub fn genGlobalAsm(mod: *Module, writer: anytype) !void { } pub fn genErrDecls(o: *Object) !void { + const mod = o.dg.module; const writer = o.writer(); try writer.writeAll("enum {\n"); o.indent_writer.pushIndent(); var max_name_len: usize = 0; - for (o.dg.module.error_name_list.items, 0..) |name, value| { + for (mod.error_name_list.items, 0..) |name, value| { max_name_len = std.math.max(name.len, max_name_len); var err_pl = Value.Payload.Error{ .data = .{ .name = name } }; try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other); @@ -2430,12 +2431,15 @@ pub fn genErrDecls(o: *Object) !void { defer o.dg.gpa.free(name_buf); @memcpy(name_buf[0..name_prefix.len], name_prefix); - for (o.dg.module.error_name_list.items) |name| { + for (mod.error_name_list.items) |name| { @memcpy(name_buf[name_prefix.len..][0..name.len], name); const identifier = name_buf[0 .. name_prefix.len + name.len]; - var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len }; - const name_ty = Type.initPayload(&name_ty_pl.base); + const name_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; const name_val = Value.initPayload(&name_pl.base); @@ -2448,15 +2452,15 @@ pub fn genErrDecls(o: *Object) !void { } var name_array_ty_pl = Type.Payload.Array{ .base = .{ .tag = .array }, .data = .{ - .len = o.dg.module.error_name_list.items.len, - .elem_type = Type.initTag(.const_slice_u8_sentinel_0), + .len = mod.error_name_list.items.len, + .elem_type = Type.const_slice_u8_sentinel_0, } }; const name_array_ty = Type.initPayload(&name_array_ty_pl.base); try writer.writeAll("static "); try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete); try writer.writeAll(" = {"); - for (o.dg.module.error_name_list.items, 0..) |name, value| { + for (mod.error_name_list.items, 0..) |name, value| { if (value != 0) try writer.writeByte(','); var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; @@ -2487,6 +2491,7 @@ fn genExports(o: *Object) !void { } pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { + const mod = o.dg.module; const w = o.writer(); const key = lazy_fn.key_ptr.*; const val = lazy_fn.value_ptr; @@ -2495,7 +2500,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { .tag_name => { const enum_ty = val.data.tag_name; - const name_slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const name_slice_ty = Type.const_slice_u8_sentinel_0; try w.writeAll("static "); try o.dg.renderType(w, name_slice_ty); @@ -2514,11 +2519,11 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { var int_pl: Value.Payload.U64 = undefined; const int_val = tag_val.enumToInt(enum_ty, &int_pl); - var name_ty_pl = Type.Payload.Len{ - .base = .{ .tag = .array_u8_sentinel_0 }, - .data = name.len, - }; - const name_ty = Type.initPayload(&name_ty_pl.base); + const name_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; const name_val = Value.initPayload(&name_pl.base); @@ -2547,7 +2552,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try w.writeAll("}\n"); }, .never_tail, .never_inline => |fn_decl_index| { - const fn_decl = o.dg.module.declPtr(fn_decl_index); + const fn_decl = mod.declPtr(fn_decl_index); const fn_cty = try o.dg.typeToCType(fn_decl.ty, .complete); const fn_info = fn_cty.cast(CType.Payload.Function).?.data; @@ -3150,7 +3155,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const ptr_ty = f.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); const ptr = try f.resolveInst(bin_op.lhs); @@ -3166,7 +3171,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, inst_ty); try writer.writeByte(')'); if (elem_has_bits) try writer.writeByte('&'); - if (elem_has_bits and ptr_ty.ptrSize() == .One) { + if (elem_has_bits and ptr_ty.ptrSize(mod) == .One) { // It's a pointer to an array, so we need to de-reference. try f.writeCValueDeref(writer, ptr); } else try f.writeCValue(writer, ptr, .Other); @@ -3264,7 +3269,7 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const inst_ty = f.typeOfIndex(inst); - const elem_type = inst_ty.elemType(); + const elem_type = inst_ty.childType(mod); if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue( @@ -3280,7 +3285,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const inst_ty = f.typeOfIndex(inst); - const elem_ty = inst_ty.elemType(); + const elem_ty = inst_ty.childType(mod); if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue( @@ -3323,7 +3328,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ptr_ty = f.typeOf(ty_op.operand); const ptr_scalar_ty = ptr_ty.scalarType(mod); - const ptr_info = ptr_scalar_ty.ptrInfo().data; + const ptr_info = ptr_scalar_ty.ptrInfo(mod); const src_ty = ptr_info.pointee_type; if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -3412,7 +3417,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const writer = f.object.writer(); const op_inst = Air.refToIndex(un_op); const op_ty = f.typeOf(un_op); - const ret_ty = if (is_ptr) op_ty.childType() else op_ty; + const ret_ty = if (is_ptr) op_ty.childType(mod) else op_ty; var lowered_ret_buf: LowerFnRetTyBuffer = undefined; const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); @@ -3601,7 +3606,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const ptr_ty = f.typeOf(bin_op.lhs); const ptr_scalar_ty = ptr_ty.scalarType(mod); - const ptr_info = ptr_scalar_ty.ptrInfo().data; + const ptr_info = ptr_scalar_ty.ptrInfo(mod); const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.typeOf(bin_op.rhs); @@ -4156,7 +4161,7 @@ fn airCall( const callee_ty = f.typeOf(pl_op.operand); const fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, - .Pointer => callee_ty.childType(), + .Pointer => callee_ty.childType(mod), else => unreachable, }; @@ -4331,10 +4336,11 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.TryPtr, ty_pl.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = f.typeOf(extra.data.ptr).childType(); + const err_union_ty = f.typeOf(extra.data.ptr).childType(mod); return lowerTry(f, inst, extra.data.ptr, body, err_union_ty, true); } @@ -4826,7 +4832,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[1] == '{'; if (is_reg) { - const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(); + const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(mod); try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(output_ty, alignment); @@ -5061,9 +5067,8 @@ fn airIsNull( } const operand_ty = f.typeOf(un_op); - const optional_ty = if (is_ptr) operand_ty.childType() else operand_ty; - var payload_buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&payload_buf); + const optional_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; + const payload_ty = optional_ty.optionalChild(mod); var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) @@ -5097,8 +5102,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const opt_ty = f.typeOf(ty_op.operand); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); + const payload_ty = opt_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return .none; @@ -5132,10 +5136,10 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const ptr_ty = f.typeOf(ty_op.operand); - const opt_ty = ptr_ty.childType(); + const opt_ty = ptr_ty.childType(mod); const inst_ty = f.typeOfIndex(inst); - if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime(mod)) { + if (!inst_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod)) { return .{ .undef = inst_ty }; } @@ -5163,7 +5167,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const operand_ty = f.typeOf(ty_op.operand); - const opt_ty = operand_ty.elemType(); + const opt_ty = operand_ty.childType(mod); const inst_ty = f.typeOfIndex(inst); @@ -5221,7 +5225,7 @@ fn fieldLocation( else .{ .identifier = container_ty.structFieldName(next_field_index) } }; } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin, - .Packed => if (field_ptr_ty.ptrInfo().data.host_size == 0) + .Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0) .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) } else .begin, @@ -5243,7 +5247,7 @@ fn fieldLocation( }, .Packed => .begin, }, - .Pointer => switch (container_ty.ptrSize()) { + .Pointer => switch (container_ty.ptrSize(mod)) { .Slice => switch (field_index) { 0 => .{ .field = .{ .identifier = "ptr" } }, 1 => .{ .field = .{ .identifier = "len" } }, @@ -5280,7 +5284,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const container_ptr_ty = f.typeOfIndex(inst); - const container_ty = container_ptr_ty.childType(); + const container_ty = container_ptr_ty.childType(mod); const field_ptr_ty = f.typeOf(extra.field_ptr); const field_ptr_val = try f.resolveInst(extra.field_ptr); @@ -5296,7 +5300,9 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, mod)) { .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), .field => |field| { - var u8_ptr_pl = field_ptr_ty.ptrInfo(); + var u8_ptr_pl: Type.Payload.Pointer = .{ + .data = field_ptr_ty.ptrInfo(mod), + }; u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); @@ -5311,7 +5317,9 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("))"); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = field_ptr_ty.ptrInfo(); + var u8_ptr_pl: Type.Payload.Pointer = .{ + .data = field_ptr_ty.ptrInfo(mod), + }; u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); @@ -5345,7 +5353,7 @@ fn fieldPtr( field_index: u32, ) !CValue { const mod = f.object.dg.module; - const container_ty = container_ptr_ty.elemType(); + const container_ty = container_ptr_ty.childType(mod); const field_ptr_ty = f.typeOfIndex(inst); // Ensure complete type definition is visible before accessing fields. @@ -5365,7 +5373,9 @@ fn fieldPtr( try f.writeCValueDerefMember(writer, container_ptr_val, field); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = field_ptr_ty.ptrInfo(); + var u8_ptr_pl: Type.Payload.Pointer = .{ + .data = field_ptr_ty.ptrInfo(mod), + }; u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); @@ -5532,7 +5542,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer; - const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const error_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const local = try f.allocLocal(inst, inst_ty); @@ -5569,7 +5579,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const operand_ty = f.typeOf(ty_op.operand); - const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; + const error_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; const writer = f.object.writer(); if (!error_union_ty.errorUnionPayload().hasRuntimeBits(mod)) { @@ -5673,7 +5683,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); - const error_union_ty = f.typeOf(ty_op.operand).childType(); + const error_union_ty = f.typeOf(ty_op.operand).childType(mod); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); @@ -5761,7 +5771,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const try reap(f, inst, &.{un_op}); const operand_ty = f.typeOf(un_op); const local = try f.allocLocal(inst, Type.bool); - const err_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(); const error_ty = err_union_ty.errorUnionSet(); @@ -5795,7 +5805,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const array_ty = f.typeOf(ty_op.operand).childType(); + const array_ty = f.typeOf(ty_op.operand).childType(mod); try f.writeCValueMember(writer, local, .{ .identifier = "ptr" }); try writer.writeAll(" = "); @@ -5811,7 +5821,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } else try f.writeCValue(writer, operand, .Initializer); try writer.writeAll("; "); - const array_len = array_ty.arrayLen(); + const array_len = array_ty.arrayLen(mod); var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = array_len }; const len_val = Value.initPayload(&len_pl.base); try f.writeCValueMember(writer, local, .{ .identifier = "len" }); @@ -6050,7 +6060,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue const expected_value = try f.resolveInst(extra.expected_value); const new_value = try f.resolveInst(extra.new_value); const ptr_ty = f.typeOf(extra.ptr); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const writer = f.object.writer(); const new_value_mat = try Materialize.start(f, inst, writer, ty, new_value); @@ -6152,7 +6162,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; const inst_ty = f.typeOfIndex(inst); const ptr_ty = f.typeOf(pl_op.operand); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const ptr = try f.resolveInst(pl_op.operand); const operand = try f.resolveInst(extra.operand); @@ -6207,7 +6217,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); const ptr_ty = f.typeOf(atomic_load.ptr); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const repr_ty = if (ty.isRuntimeFloat()) mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable @@ -6241,7 +6251,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const ptr_ty = f.typeOf(bin_op.lhs); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const ptr = try f.resolveInst(bin_op.lhs); const element = try f.resolveInst(bin_op.rhs); @@ -6299,7 +6309,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } try writer.writeAll("memset("); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" }); try writer.writeAll(", 0xaa, "); @@ -6311,8 +6321,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } }, .One => { - const array_ty = dest_ty.childType(); - const len = array_ty.arrayLen() * elem_abi_size; + const array_ty = dest_ty.childType(mod); + const len = array_ty.arrayLen(mod) * elem_abi_size; try f.writeCValue(writer, dest_slice, .FunctionArgument); try writer.print(", 0xaa, {d});\n", .{len}); @@ -6327,11 +6337,10 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { // For the assignment in this loop, the array pointer needs to get // casted to a regular pointer, otherwise an error like this occurs: // error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable - var elem_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base); + const elem_ptr_ty = try mod.ptrType(.{ + .size = .C, + .elem_type = elem_ty.ip_index, + }); const index = try f.allocLocal(inst, Type.usize); @@ -6342,13 +6351,13 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll("; "); try f.writeCValue(writer, index, .Other); try writer.writeAll(" != "); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { try f.writeCValueMember(writer, dest_slice, .{ .identifier = "len" }); }, .One => { - const array_ty = dest_ty.childType(); - try writer.print("{d}", .{array_ty.arrayLen()}); + const array_ty = dest_ty.childType(mod); + try writer.print("{d}", .{array_ty.arrayLen(mod)}); }, .Many, .C => unreachable, } @@ -6377,7 +6386,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const bitcasted = try bitcast(f, Type.u8, value, elem_ty); try writer.writeAll("memset("); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" }); try writer.writeAll(", "); @@ -6387,8 +6396,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll(");\n"); }, .One => { - const array_ty = dest_ty.childType(); - const len = array_ty.arrayLen() * elem_abi_size; + const array_ty = dest_ty.childType(mod); + const len = array_ty.arrayLen(mod) * elem_abi_size; try f.writeCValue(writer, dest_slice, .FunctionArgument); try writer.writeAll(", "); @@ -6416,9 +6425,9 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", "); try writeSliceOrPtr(f, writer, src_ptr, src_ty); try writer.writeAll(", "); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { - const elem_ty = dest_ty.childType(); + const elem_ty = dest_ty.childType(mod); const elem_abi_size = elem_ty.abiSize(mod); try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" }); if (elem_abi_size > 1) { @@ -6428,10 +6437,10 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { } }, .One => { - const array_ty = dest_ty.childType(); - const elem_ty = array_ty.childType(); + const array_ty = dest_ty.childType(mod); + const elem_ty = array_ty.childType(mod); const elem_abi_size = elem_ty.abiSize(mod); - const len = array_ty.arrayLen() * elem_abi_size; + const len = array_ty.arrayLen(mod) * elem_abi_size; try writer.print("{d});\n", .{len}); }, .Many, .C => unreachable, @@ -6448,7 +6457,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const union_ty = f.typeOf(bin_op.lhs).childType(); + const union_ty = f.typeOf(bin_op.lhs).childType(mod); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; const tag_ty = union_ty.unionTagTypeSafety().?; @@ -6777,7 +6786,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const inst_ty = f.typeOfIndex(inst); - const len = @intCast(usize, inst_ty.arrayLen()); + const len = @intCast(usize, inst_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]); const gpa = f.object.dg.gpa; const resolved_elements = try gpa.alloc(CValue, elements.len); @@ -6796,7 +6805,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => { - const elem_ty = inst_ty.childType(); + const elem_ty = inst_ty.childType(mod); const a = try Assignment.init(f, elem_ty); for (resolved_elements, 0..) |element, i| { try a.restart(f, writer); @@ -6806,7 +6815,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, element, .Other); try a.end(f, writer); } - if (inst_ty.sentinel()) |sentinel| { + if (inst_ty.sentinel(mod)) |sentinel| { try a.restart(f, writer); try f.writeCValue(writer, local, .Other); try writer.print("[{d}]", .{resolved_elements.len}); @@ -7708,7 +7717,7 @@ const Vectorize = struct { pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { const mod = f.object.dg.module; return if (ty.zigTypeTag(mod) == .Vector) index: { - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() }; + var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen(mod) }; const local = try f.allocLocal(inst, Type.usize); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 27fa997fd34a..9e6de6cb21ff 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1423,7 +1423,7 @@ pub const CType = extern union { }), .Pointer => { - const info = ty.ptrInfo().data; + const info = ty.ptrInfo(mod); switch (info.size) { .Slice => { if (switch (kind) { @@ -1625,9 +1625,9 @@ pub const CType = extern union { .Vector => .vector, else => unreachable, }; - if (try lookup.typeToIndex(ty.childType(), kind)) |child_idx| { + if (try lookup.typeToIndex(ty.childType(mod), kind)) |child_idx| { self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{ - .len = ty.arrayLenIncludingSentinel(), + .len = ty.arrayLenIncludingSentinel(mod), .elem_type = child_idx, } } }; self.value = .{ .cty = initPayload(&self.storage.seq) }; @@ -1639,8 +1639,7 @@ pub const CType = extern union { }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (ty.optionalReprIsPayload(mod)) { try self.initType(payload_ty, kind, lookup); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5d9345c84f00..f45a63df7271 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -597,7 +597,7 @@ pub const Object = struct { llvm_usize_ty, }; const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); const error_name_list = mod.error_name_list.items; @@ -1071,7 +1071,7 @@ pub const Object = struct { .slice => { assert(!it.byval_attr); const param_ty = fn_info.param_types[it.zig_index - 1]; - const ptr_info = param_ty.ptrInfo().data; + const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, it.zig_index - 1)) |i| { if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { @@ -1596,7 +1596,7 @@ pub const Object = struct { }, .Pointer => { // Normalize everything that the debug info does not represent. - const ptr_info = ty.ptrInfo().data; + const ptr_info = ty.ptrInfo(mod); if (ptr_info.sentinel != null or ptr_info.@"addrspace" != .generic or @@ -1755,8 +1755,8 @@ pub const Object = struct { const array_di_ty = dib.createArrayType( ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, - try o.lowerDebugType(ty.childType(), .full), - @intCast(c_int, ty.arrayLen()), + try o.lowerDebugType(ty.childType(mod), .full), + @intCast(c_int, ty.arrayLen(mod)), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .mod = o.module }); @@ -1781,14 +1781,14 @@ pub const Object = struct { break :blk dib.createBasicType(name, info.bits, dwarf_encoding); }, .Bool => dib.createBasicType("bool", 1, DW.ATE.boolean), - else => try o.lowerDebugType(ty.childType(), .full), + else => try o.lowerDebugType(ty.childType(mod), .full), }; const vector_di_ty = dib.createVectorType( ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, elem_di_type, - ty.vectorLen(), + ty.vectorLen(mod), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .mod = o.module }); @@ -1797,8 +1797,7 @@ pub const Object = struct { .Optional => { const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { const di_bits = 8; // lldb cannot handle non-byte sized types const di_ty = dib.createBasicType(name, di_bits, DW.ATE.boolean); @@ -2350,11 +2349,7 @@ pub const Object = struct { try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full)); if (sret) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = fn_info.return_type, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(fn_info.return_type); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } } else { @@ -2364,11 +2359,7 @@ pub const Object = struct { if (fn_info.return_type.isError(mod) and o.module.comp.bin_file.options.error_return_tracing) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = o.getStackTraceType(), - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(o.getStackTraceType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } @@ -2376,11 +2367,7 @@ pub const Object = struct { if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (isByRef(param_ty, mod)) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = param_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(param_ty); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } else { try param_di_types.append(try o.lowerDebugType(param_ty, .full)); @@ -2843,7 +2830,7 @@ pub const DeclGen = struct { }; return dg.context.structType(&fields, fields.len, .False); } - const ptr_info = t.ptrInfo().data; + const ptr_info = t.ptrInfo(mod); const llvm_addrspace = toLlvmAddressSpace(ptr_info.@"addrspace", target); return dg.context.pointerType(llvm_addrspace); }, @@ -2866,19 +2853,18 @@ pub const DeclGen = struct { return llvm_struct_ty; }, .Array => { - const elem_ty = t.childType(); + const elem_ty = t.childType(mod); assert(elem_ty.onePossibleValue(mod) == null); const elem_llvm_ty = try dg.lowerType(elem_ty); - const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null); + const total_len = t.arrayLen(mod) + @boolToInt(t.sentinel(mod) != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); }, .Vector => { - const elem_type = try dg.lowerType(t.childType()); - return elem_type.vectorType(t.vectorLen()); + const elem_type = try dg.lowerType(t.childType(mod)); + return elem_type.vectorType(t.vectorLen(mod)); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = t.optionalChild(&buf); + const child_ty = t.optionalChild(mod); if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.context.intType(8); } @@ -3173,11 +3159,7 @@ pub const DeclGen = struct { if (fn_info.return_type.isError(mod) and mod.comp.bin_file.options.error_return_tracing) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = dg.object.getStackTraceType(), - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(dg.object.getStackTraceType()); try llvm_params.append(try dg.lowerType(ptr_ty)); } @@ -3199,9 +3181,8 @@ pub const DeclGen = struct { .slice => { const param_ty = fn_info.param_types[it.zig_index - 1]; var buf: Type.SlicePtrFieldTypeBuffer = undefined; - var opt_buf: Type.Payload.ElemType = undefined; const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) - param_ty.optionalChild(&opt_buf).slicePtrFieldType(&buf) + param_ty.optionalChild(mod).slicePtrFieldType(&buf) else param_ty.slicePtrFieldType(&buf); const ptr_llvm_ty = try dg.lowerType(ptr_ty); @@ -3247,7 +3228,7 @@ pub const DeclGen = struct { const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, .Fn => !elem_ty.fnInfo().is_generic, - .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(mod), + .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), }; const llvm_elem_ty = if (lower_elem_ty) @@ -3417,7 +3398,7 @@ pub const DeclGen = struct { return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); }, .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { - return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo().data.bit_offset % 8 == 0); + return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); }, .null_value, .zero => { const llvm_type = try dg.lowerType(tv.ty); @@ -3425,7 +3406,7 @@ pub const DeclGen = struct { }, .opt_payload => { const payload = tv.val.castTag(.opt_payload).?.data; - return dg.lowerParentPtr(payload, tv.ty.ptrInfo().data.bit_offset % 8 == 0); + return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); }, else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ tv.ty.fmtDebug(), tag, @@ -3436,14 +3417,14 @@ pub const DeclGen = struct { const bytes = tv.val.castTag(.bytes).?.data; return dg.context.constString( bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel()), + @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), .True, // Don't null terminate. Bytes has the sentinel, if any. ); }, .str_lit => { const str_lit = tv.val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - if (tv.ty.sentinel()) |sent_val| { + if (tv.ty.sentinel(mod)) |sent_val| { const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); if (byte == 0 and bytes.len > 0) { return dg.context.constString( @@ -3472,9 +3453,9 @@ pub const DeclGen = struct { }, .aggregate => { const elem_vals = tv.val.castTag(.aggregate).?.data; - const elem_ty = tv.ty.elemType(); + const elem_ty = tv.ty.childType(mod); const gpa = dg.gpa; - const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel()); + const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel(mod)); const llvm_elems = try gpa.alloc(*llvm.Value, len); defer gpa.free(llvm_elems); var need_unnamed = false; @@ -3498,9 +3479,9 @@ pub const DeclGen = struct { }, .repeated => { const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.elemType(); - const sentinel = tv.ty.sentinel(); - const len = @intCast(usize, tv.ty.arrayLen()); + const elem_ty = tv.ty.childType(mod); + const sentinel = tv.ty.sentinel(mod); + const len = @intCast(usize, tv.ty.arrayLen(mod)); const len_including_sent = len + @boolToInt(sentinel != null); const gpa = dg.gpa; const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); @@ -3534,8 +3515,8 @@ pub const DeclGen = struct { } }, .empty_array_sentinel => { - const elem_ty = tv.ty.elemType(); - const sent_val = tv.ty.sentinel().?; + const elem_ty = tv.ty.childType(mod); + const sent_val = tv.ty.sentinel(mod).?; const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val }); const llvm_elems: [1]*llvm.Value = .{sentinel}; const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); @@ -3550,8 +3531,7 @@ pub const DeclGen = struct { }, .Optional => { comptime assert(optional_layout_version == 3); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = tv.ty.optionalChild(&buf); + const payload_ty = tv.ty.optionalChild(mod); const llvm_i8 = dg.context.intType(8); const is_pl = !tv.val.isNull(mod); @@ -3897,10 +3877,10 @@ pub const DeclGen = struct { .bytes => { // Note, sentinel is not stored even if the type has a sentinel. const bytes = tv.val.castTag(.bytes).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen()); + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); assert(vector_len == bytes.len or vector_len + 1 == bytes.len); - const elem_ty = tv.ty.elemType(); + const elem_ty = tv.ty.childType(mod); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { @@ -3923,9 +3903,9 @@ pub const DeclGen = struct { // Note, sentinel is not stored even if the type has a sentinel. // The value includes the sentinel in those cases. const elem_vals = tv.val.castTag(.aggregate).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen()); + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len); - const elem_ty = tv.ty.elemType(); + const elem_ty = tv.ty.childType(mod); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { @@ -3939,8 +3919,8 @@ pub const DeclGen = struct { .repeated => { // Note, sentinel is not stored even if the type has a sentinel. const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.elemType(); - const len = @intCast(usize, tv.ty.arrayLen()); + const elem_ty = tv.ty.childType(mod); + const len = @intCast(usize, tv.ty.arrayLen(mod)); const llvm_elems = try dg.gpa.alloc(*llvm.Value, len); defer dg.gpa.free(llvm_elems); for (llvm_elems) |*elem| { @@ -3955,10 +3935,10 @@ pub const DeclGen = struct { // Note, sentinel is not stored const str_lit = tv.val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const vector_len = @intCast(usize, tv.ty.arrayLen()); + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); assert(vector_len == bytes.len); - const elem_ty = tv.ty.elemType(); + const elem_ty = tv.ty.childType(mod); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { @@ -4006,13 +3986,10 @@ pub const DeclGen = struct { ptr_val: Value, decl_index: Module.Decl.Index, ) Error!*llvm.Value { - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = decl.ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const mod = dg.module; + const decl = mod.declPtr(decl_index); + mod.markDeclAlive(decl); + const ptr_ty = try mod.singleMutPtrType(decl.ty); return try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); } @@ -4135,9 +4112,8 @@ pub const DeclGen = struct { .opt_payload_ptr => { const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, true); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf); + const payload_ty = opt_payload_ptr.container_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or payload_ty.optionalReprIsPayload(mod)) { @@ -4251,7 +4227,8 @@ pub const DeclGen = struct { } fn lowerPtrToVoid(dg: *DeclGen, ptr_ty: Type) !*llvm.Value { - const alignment = ptr_ty.ptrInfo().data.@"align"; + const mod = dg.module; + const alignment = ptr_ty.ptrInfo(mod).@"align"; // Even though we are pointing at something which has zero bits (e.g. `void`), // Pointers are defined to have bits. So we must return something here. // The value cannot be undefined, because we use the `nonnull` annotation @@ -4374,7 +4351,7 @@ pub const DeclGen = struct { ) void { const mod = dg.module; if (param_ty.isPtrAtRuntime(mod)) { - const ptr_info = param_ty.ptrInfo().data; + const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, param_index)) |i| { if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { dg.addArgAttr(llvm_fn, llvm_arg_i, "noalias"); @@ -4786,7 +4763,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, - .Pointer => callee_ty.childType(), + .Pointer => callee_ty.childType(mod), else => unreachable, }; const fn_info = zig_fn_ty.fnInfo(); @@ -5014,7 +4991,7 @@ pub const FuncGen = struct { .slice => { assert(!it.byval_attr); const param_ty = fn_info.param_types[it.zig_index - 1]; - const ptr_info = param_ty.ptrInfo().data; + const ptr_info = param_ty.ptrInfo(mod); const llvm_arg_i = it.llvm_index - 2; if (math.cast(u5, it.zig_index - 1)) |i| { @@ -5098,11 +5075,7 @@ pub const FuncGen = struct { const ret_ty = self.typeOf(un_op); if (self.ret_ptr) |ret_ptr| { const operand = try self.resolveInst(un_op); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(ret_ptr, ptr_ty, operand, .NotAtomic); _ = self.builder.buildRetVoid(); return null; @@ -5150,11 +5123,11 @@ pub const FuncGen = struct { } fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.typeOf(un_op); - const ret_ty = ptr_ty.childType(); + const ret_ty = ptr_ty.childType(mod); const fn_info = self.dg.decl.ty.fnInfo(); - const mod = self.dg.module; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (fn_info.return_type.isError(mod)) { // Functions with an empty error set are emitted with an error code @@ -5301,15 +5274,13 @@ pub const FuncGen = struct { operand_ty: Type, op: math.CompareOperator, ) Allocator.Error!*llvm.Value { - var opt_buffer: Type.Payload.ElemType = undefined; - const mod = self.dg.module; const scalar_ty = operand_ty.scalarType(mod); const int_ty = switch (scalar_ty.zigTypeTag(mod)) { .Enum => scalar_ty.intTagType(), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { - const payload_ty = operand_ty.optionalChild(&opt_buffer); + const payload_ty = operand_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.optionalReprIsPayload(mod)) { @@ -5506,11 +5477,12 @@ pub const FuncGen = struct { } fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try self.resolveInst(extra.data.ptr); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(mod); const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused); } @@ -5661,9 +5633,9 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); - const array_ty = operand_ty.childType(); + const array_ty = operand_ty.childType(mod); const llvm_usize = try self.dg.lowerType(Type.usize); - const len = llvm_usize.constInt(array_ty.arrayLen(), .False); + const len = llvm_usize.constInt(array_ty.arrayLen(mod), .False); const slice_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -5806,20 +5778,20 @@ pub const FuncGen = struct { const mod = fg.dg.module; const target = mod.getTarget(); const llvm_usize_ty = fg.context.intType(target.ptrBitWidth()); - switch (ty.ptrSize()) { + switch (ty.ptrSize(mod)) { .Slice => { const len = fg.builder.buildExtractValue(ptr, 1, ""); - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const abi_size = elem_ty.abiSize(mod); if (abi_size == 1) return len; const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False); return fg.builder.buildMul(len, abi_size_llvm_val, ""); }, .One => { - const array_ty = ty.childType(); - const elem_ty = array_ty.childType(); + const array_ty = ty.childType(mod); + const elem_ty = array_ty.childType(mod); const abi_size = elem_ty.abiSize(mod); - return llvm_usize_ty.constInt(array_ty.arrayLen() * abi_size, .False); + return llvm_usize_ty.constInt(array_ty.arrayLen(mod) * abi_size, .False); }, .Many, .C => unreachable, } @@ -5832,10 +5804,11 @@ pub const FuncGen = struct { } fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); const slice_ptr_ty = self.typeOf(ty_op.operand); - const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType()); + const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType(mod)); return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, ""); } @@ -5847,7 +5820,7 @@ pub const FuncGen = struct { const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); - const elem_ty = slice_ty.childType(); + const elem_ty = slice_ty.childType(mod); const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; @@ -5863,13 +5836,14 @@ pub const FuncGen = struct { } fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); - const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType()); + const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType(mod)); const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); @@ -5884,7 +5858,7 @@ pub const FuncGen = struct { const array_llvm_val = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try self.dg.lowerType(array_ty); - const elem_ty = array_ty.childType(); + const elem_ty = array_ty.childType(mod); if (isByRef(array_ty, mod)) { const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; if (isByRef(elem_ty, mod)) { @@ -5923,7 +5897,7 @@ pub const FuncGen = struct { const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -5951,14 +5925,14 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const elem_ptr = self.air.getRefType(ty_pl.ty); - if (elem_ptr.ptrInfo().data.vector_index != .none) return base_ptr; + if (elem_ptr.ptrInfo(mod).vector_index != .none) return base_ptr; const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); if (ptr_ty.isSinglePointer(mod)) { @@ -6098,7 +6072,7 @@ pub const FuncGen = struct { const field_ptr = try self.resolveInst(extra.field_ptr); const target = self.dg.module.getTarget(); - const parent_ty = self.air.getRefType(ty_pl.ty).childType(); + const parent_ty = self.air.getRefType(ty_pl.ty).childType(mod); const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty)); @@ -6232,6 +6206,7 @@ pub const FuncGen = struct { } fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const dib = self.dg.object.di_builder orelse return null; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); @@ -6243,7 +6218,7 @@ pub const FuncGen = struct { name.ptr, self.di_file.?, self.prev_dbg_line, - try self.dg.object.lowerDebugType(ptr_ty.childType(), .full), + try self.dg.object.lowerDebugType(ptr_ty.childType(mod), .full), true, // always preserve 0, // flags ); @@ -6365,7 +6340,7 @@ pub const FuncGen = struct { const output_inst = try self.resolveInst(output); const output_ty = self.typeOf(output); assert(output_ty.zigTypeTag(mod) == .Pointer); - const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType()); + const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType(mod)); if (llvm_ret_indirect[i]) { // Pass the result by reference as an indirect output (e.g. "=*m") @@ -6466,7 +6441,7 @@ pub const FuncGen = struct { // an elementtype() attribute. if (constraint[0] == '*') { llvm_param_attrs[llvm_param_i] = llvm_elem_ty orelse - try self.dg.lowerPtrElemTy(arg_ty.childType()); + try self.dg.lowerPtrElemTy(arg_ty.childType(mod)); } else { llvm_param_attrs[llvm_param_i] = null; } @@ -6657,14 +6632,13 @@ pub const FuncGen = struct { operand_is_ptr: bool, pred: llvm.IntPredicate, ) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); - const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const optional_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const optional_llvm_ty = try self.dg.lowerType(optional_ty); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); - const mod = self.dg.module; + const payload_ty = optional_ty.optionalChild(mod); if (optional_ty.optionalReprIsPayload(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") @@ -6709,7 +6683,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); - const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(); const err_set_ty = try self.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); @@ -6748,9 +6722,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const optional_ty = self.typeOf(ty_op.operand).childType(mod); + const payload_ty = optional_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. @@ -6770,9 +6743,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const optional_ty = self.typeOf(ty_op.operand).childType(mod); + const payload_ty = optional_ty.optionalChild(mod); const non_null_bit = self.context.intType(8).constInt(1, .False); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. @@ -6827,9 +6799,9 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); - const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const result_ty = self.typeOfIndex(inst); - const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; + const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return if (operand_is_ptr) operand else null; @@ -6862,7 +6834,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); - const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { const err_llvm_ty = try self.dg.lowerType(Type.anyerror); if (operand_is_ptr) { @@ -6895,7 +6867,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const err_union_ty = self.typeOf(ty_op.operand).childType(); + const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(); const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); @@ -6961,11 +6933,7 @@ pub const FuncGen = struct { if (isByRef(optional_ty, mod)) { const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, ""); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = payload_ty, - }; - const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); const non_null_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 1, ""); _ = self.builder.buildStore(non_null_bit, non_null_ptr); @@ -6995,11 +6963,7 @@ pub const FuncGen = struct { const store_inst = self.builder.buildStore(ok_err_code, err_ptr); store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = payload_ty, - }; - const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); return result_ptr; } @@ -7027,11 +6991,7 @@ pub const FuncGen = struct { const store_inst = self.builder.buildStore(operand, err_ptr); store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = payload_ty, - }; - const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); // TODO store undef to payload_ptr _ = payload_ptr; _ = payload_ptr_ty; @@ -7076,7 +7036,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(extra.rhs); const loaded_vector = blk: { - const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType()); + const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType(mod)); const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr())); @@ -7287,7 +7247,7 @@ pub const FuncGen = struct { const inst_llvm_ty = try self.dg.lowerType(inst_ty); const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { - const vec_len = inst_ty.vectorLen(); + const vec_len = inst_ty.vectorLen(mod); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); @@ -7361,7 +7321,7 @@ pub const FuncGen = struct { if (scalar_ty.isSignedInt(mod)) { const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { - const vec_len = inst_ty.vectorLen(); + const vec_len = inst_ty.vectorLen(mod); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); @@ -7384,13 +7344,14 @@ pub const FuncGen = struct { } fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const ptr_ty = self.typeOf(bin_op.lhs); - const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); - switch (ptr_ty.ptrSize()) { + const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType(mod)); + switch (ptr_ty.ptrSize(mod)) { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), offset }; @@ -7409,14 +7370,15 @@ pub const FuncGen = struct { } fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const negative_offset = self.builder.buildNeg(offset, ""); const ptr_ty = self.typeOf(bin_op.lhs); - const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); - switch (ptr_ty.ptrSize()) { + const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType(mod)); + switch (ptr_ty.ptrSize(mod)) { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. const indices: [2]*llvm.Value = .{ @@ -7587,7 +7549,7 @@ pub const FuncGen = struct { }; if (ty.zigTypeTag(mod) == .Vector) { - const vec_len = ty.vectorLen(); + const vec_len = ty.vectorLen(mod); const vector_result_ty = llvm_i32.vectorType(vec_len); var result = vector_result_ty.getUndef(); @@ -7672,8 +7634,8 @@ pub const FuncGen = struct { const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False); const sign_mask = one.constShl(shift_amt); const result = if (ty.zigTypeTag(mod) == .Vector) blk: { - const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(), sign_mask, ""); - const cast_ty = int_llvm_ty.vectorType(ty.vectorLen()); + const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(mod), sign_mask, ""); + const cast_ty = int_llvm_ty.vectorType(ty.vectorLen(mod)); const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, ""); break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, ""); } else blk: { @@ -7720,7 +7682,7 @@ pub const FuncGen = struct { const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty); if (ty.zigTypeTag(mod) == .Vector) { const result = llvm_ty.getUndef(); - return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen()); + return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen(mod)); } break :b libc_fn; @@ -7887,7 +7849,7 @@ pub const FuncGen = struct { const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False); const lhs_max = lhs_scalar_llvm_ty.constAllOnes(); if (rhs_ty.zigTypeTag(mod) == .Vector) { - const vec_len = rhs_ty.vectorLen(); + const vec_len = rhs_ty.vectorLen(mod); const bits_vec = self.builder.buildVectorSplat(vec_len, bits, ""); const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, ""); const in_range = self.builder.buildICmp(.ULT, rhs, bits_vec, ""); @@ -8059,7 +8021,7 @@ pub const FuncGen = struct { } if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) { - const elem_ty = operand_ty.childType(); + const elem_ty = operand_ty.childType(mod); if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } @@ -8074,7 +8036,7 @@ pub const FuncGen = struct { const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); - const vector_len = operand_ty.arrayLen(); + const vector_len = operand_ty.arrayLen(mod); var i: u64 = 0; while (i < vector_len) : (i += 1) { const index_usize = llvm_usize.constInt(i, .False); @@ -8087,7 +8049,7 @@ pub const FuncGen = struct { } return array_ptr; } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { - const elem_ty = operand_ty.childType(); + const elem_ty = operand_ty.childType(mod); const llvm_vector_ty = try self.dg.lowerType(inst_ty); if (!operand_is_ref) { return self.dg.todo("implement bitcast non-ref array to vector", .{}); @@ -8108,7 +8070,7 @@ pub const FuncGen = struct { const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); - const vector_len = operand_ty.arrayLen(); + const vector_len = operand_ty.arrayLen(mod); var vector = llvm_vector_ty.getUndef(); var i: u64 = 0; while (i < vector_len) : (i += 1) { @@ -8207,7 +8169,7 @@ pub const FuncGen = struct { fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ptr_ty = self.typeOfIndex(inst); - const pointee_type = ptr_ty.childType(); + const pointee_type = ptr_ty.childType(mod); if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const pointee_llvm_ty = try self.dg.lowerType(pointee_type); @@ -8218,7 +8180,7 @@ pub const FuncGen = struct { fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ptr_ty = self.typeOfIndex(inst); - const ret_ty = ptr_ty.childType(); + const ret_ty = ptr_ty.childType(mod); if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = try self.dg.lowerType(ret_ty); @@ -8232,11 +8194,11 @@ pub const FuncGen = struct { } fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); - const operand_ty = ptr_ty.childType(); - const mod = self.dg.module; + const operand_ty = ptr_ty.childType(mod); const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { @@ -8271,8 +8233,10 @@ pub const FuncGen = struct { /// /// The first instruction of `body_tail` is the one whose copy we want to elide. fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool { + const mod = fg.dg.module; + const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { - switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0])) { + switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip.*)) { .none => continue, .write, .noret, .complex => return false, .tomb => return true, @@ -8288,7 +8252,7 @@ pub const FuncGen = struct { const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[inst].ty_op; const ptr_ty = fg.typeOf(ty_op.operand); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const ptr = try fg.resolveInst(ty_op.operand); elide: { @@ -8363,7 +8327,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(extra.ptr); var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); - const operand_ty = self.typeOf(extra.ptr).elemType(); + const operand_ty = self.typeOf(extra.ptr).childType(mod); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating @@ -8409,7 +8373,7 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); const ptr_ty = self.typeOf(pl_op.operand); - const operand_ty = ptr_ty.elemType(); + const operand_ty = ptr_ty.childType(mod); const operand = try self.resolveInst(extra.operand); const is_signed_int = operand_ty.isSignedInt(mod); const is_float = operand_ty.isRuntimeFloat(); @@ -8464,7 +8428,7 @@ pub const FuncGen = struct { const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.typeOf(atomic_load.ptr); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const elem_ty = ptr_info.pointee_type; if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -8497,7 +8461,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const operand_ty = ptr_ty.childType(); + const operand_ty = ptr_ty.childType(mod); if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); @@ -8595,9 +8559,9 @@ pub const FuncGen = struct { const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd"); const llvm_usize_ty = self.context.intType(target.ptrBitWidth()); - const len = switch (ptr_ty.ptrSize()) { + const len = switch (ptr_ty.ptrSize(mod)) { .Slice => self.builder.buildExtractValue(dest_slice, 1, ""), - .One => llvm_usize_ty.constInt(ptr_ty.childType().arrayLen(), .False), + .One => llvm_usize_ty.constInt(ptr_ty.childType(mod).arrayLen(mod), .False), .Many, .C => unreachable, }; const elem_llvm_ty = try self.dg.lowerType(elem_ty); @@ -8665,7 +8629,7 @@ pub const FuncGen = struct { fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const un_ty = self.typeOf(bin_op.lhs).childType(); + const un_ty = self.typeOf(bin_op.lhs).childType(mod); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_ptr = try self.resolveInst(bin_op.lhs); @@ -8791,7 +8755,7 @@ pub const FuncGen = struct { // The truncated result at the end will be the correct bswap const scalar_llvm_ty = self.context.intType(bits + 8); if (operand_ty.zigTypeTag(mod) == .Vector) { - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); @@ -8980,7 +8944,7 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const llvm_ret_ty = try self.dg.lowerType(slice_ty); const usize_llvm_ty = try self.dg.lowerType(Type.usize); const slice_alignment = slice_ty.abiAlignment(mod); @@ -9097,10 +9061,11 @@ pub const FuncGen = struct { } fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const scalar = try self.resolveInst(ty_op.operand); const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); return self.builder.buildVectorSplat(len, scalar, ""); } @@ -9122,7 +9087,7 @@ pub const FuncGen = struct { const b = try self.resolveInst(extra.b); const mask = self.air.values[extra.mask]; const mask_len = extra.mask_len; - const a_len = self.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(mod); // LLVM uses integers larger than the length of the first array to // index into the second array. This was deemed unnecessarily fragile @@ -9298,14 +9263,14 @@ pub const FuncGen = struct { .ty = scalar_ty, .val = Value.initPayload(&init_value_payload.base), }); - return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(), init_value); + return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_value); } fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const result_ty = self.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const len = @intCast(usize, result_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = try self.dg.lowerType(result_ty); @@ -9400,7 +9365,7 @@ pub const FuncGen = struct { const llvm_usize = try self.dg.lowerType(Type.usize); const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); - const array_info = result_ty.arrayInfo(); + const array_info = result_ty.arrayInfo(mod); var elem_ptr_payload: Type.Payload.Pointer = .{ .data = .{ .pointee_type = array_info.elem_type, @@ -9720,7 +9685,7 @@ pub const FuncGen = struct { } const mod = self.dg.module; - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space @@ -9763,9 +9728,8 @@ pub const FuncGen = struct { opt_ty: Type, can_elide_load: bool, ) !*llvm.Value { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); const mod = fg.dg.module; + const payload_ty = opt_ty.optionalChild(mod); if (isByRef(opt_ty, mod)) { // We have a pointer and we need to return a pointer to the first field. @@ -9827,13 +9791,13 @@ pub const FuncGen = struct { struct_ptr_ty: Type, field_index: u32, ) !?*llvm.Value { - const struct_ty = struct_ptr_ty.childType(); const mod = self.dg.module; + const struct_ty = struct_ptr_ty.childType(mod); switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { const result_ty = self.typeOfIndex(inst); - const result_ty_info = result_ty.ptrInfo().data; + const result_ty_info = result_ty.ptrInfo(mod); if (result_ty_info.host_size != 0) { // From LLVM's perspective, a pointer to a packed struct and a pointer @@ -9919,7 +9883,7 @@ pub const FuncGen = struct { /// For isByRef=false types, it creates a load instruction and returns it. fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value { const mod = self.dg.module; - const info = ptr_ty.ptrInfo().data; + const info = ptr_ty.ptrInfo(mod); if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null; const ptr_alignment = info.alignment(mod); @@ -9954,7 +9918,7 @@ pub const FuncGen = struct { containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod)); + const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod)); const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); @@ -9992,9 +9956,9 @@ pub const FuncGen = struct { elem: *llvm.Value, ordering: llvm.AtomicOrdering, ) !void { - const info = ptr_ty.ptrInfo().data; - const elem_ty = info.pointee_type; const mod = self.dg.module; + const info = ptr_ty.ptrInfo(mod); + const elem_ty = info.pointee_type; if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { return; } @@ -10026,7 +9990,7 @@ pub const FuncGen = struct { assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod)); + const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod)); const containing_int_ty = containing_int.typeOf(); const shift_amt = containing_int_ty.constInt(info.bit_offset, .False); // Convert to equally-sized integer type in order to perform the bit @@ -10864,8 +10828,7 @@ const ParamTypeIterator = struct { .Unspecified, .Inline => { it.zig_index += 1; it.llvm_index += 1; - var buf: Type.Payload.ElemType = undefined; - if (ty.isSlice(mod) or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(&buf).isSlice(mod))) { + if (ty.isSlice(mod) or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(mod).isSlice(mod))) { it.llvm_index += 1; return .slice; } else if (isByRef(ty, mod)) { @@ -11185,8 +11148,7 @@ fn isByRef(ty: Type, mod: *const Module) bool { return true; }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index f69c6cb31746..9de2c03142c8 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -625,20 +625,20 @@ pub const DeclGen = struct { .Array => switch (val.tag()) { .aggregate => { const elem_vals = val.castTag(.aggregate).?.data; - const elem_ty = ty.elemType(); - const len = @intCast(u32, ty.arrayLenIncludingSentinel()); // TODO: limit spir-v to 32 bit arrays in a more elegant way. + const elem_ty = ty.childType(mod); + const len = @intCast(u32, ty.arrayLenIncludingSentinel(mod)); // TODO: limit spir-v to 32 bit arrays in a more elegant way. for (elem_vals[0..len]) |elem_val| { try self.lower(elem_ty, elem_val); } }, .repeated => { const elem_val = val.castTag(.repeated).?.data; - const elem_ty = ty.elemType(); - const len = @intCast(u32, ty.arrayLen()); + const elem_ty = ty.childType(mod); + const len = @intCast(u32, ty.arrayLen(mod)); for (0..len) |_| { try self.lower(elem_ty, elem_val); } - if (ty.sentinel()) |sentinel| { + if (ty.sentinel(mod)) |sentinel| { try self.lower(elem_ty, sentinel); } }, @@ -646,7 +646,7 @@ pub const DeclGen = struct { const str_lit = val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; try self.addBytes(bytes); - if (ty.sentinel()) |sentinel| { + if (ty.sentinel(mod)) |sentinel| { try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod))); } }, @@ -706,8 +706,7 @@ pub const DeclGen = struct { } }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); + const payload_ty = ty.optionalChild(mod); const has_payload = !val.isNull(mod); const abi_size = ty.abiSize(mod); @@ -1216,10 +1215,10 @@ pub const DeclGen = struct { return try self.spv.resolve(.{ .float_type = .{ .bits = bits } }); }, .Array => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const elem_ty_ref = try self.resolveType(elem_ty, .direct); - const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel()) orelse { - return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel()}); + const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse { + return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)}); }; return self.spv.arrayType(total_len, elem_ty_ref); }, @@ -1248,7 +1247,7 @@ pub const DeclGen = struct { }, }, .Pointer => { - const ptr_info = ty.ptrInfo().data; + const ptr_info = ty.ptrInfo(mod); const storage_class = spvStorageClass(ptr_info.@"addrspace"); const child_ty_ref = try self.resolveType(ptr_info.pointee_type, .indirect); @@ -1280,8 +1279,8 @@ pub const DeclGen = struct { // TODO: Properly verify sizes and child type. return try self.spv.resolve(.{ .vector_type = .{ - .component_type = try self.resolveType(ty.elemType(), repr), - .component_count = @intCast(u32, ty.vectorLen()), + .component_type = try self.resolveType(ty.childType(mod), repr), + .component_count = @intCast(u32, ty.vectorLen(mod)), } }); }, .Struct => { @@ -1335,8 +1334,7 @@ pub const DeclGen = struct { } }); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // Just use a bool. // Note: Always generate the bool with indirect format, to save on some sanity @@ -1685,7 +1683,8 @@ pub const DeclGen = struct { } fn load(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef) !IdRef { - const value_ty = ptr_ty.childType(); + const mod = self.module; + const value_ty = ptr_ty.childType(mod); const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect); const result_id = self.spv.allocId(); const access = spec.MemoryAccess.Extended{ @@ -1701,7 +1700,8 @@ pub const DeclGen = struct { } fn store(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, value_id: IdRef) !void { - const value_ty = ptr_ty.childType(); + const mod = self.module; + const value_ty = ptr_ty.childType(mod); const indirect_value_id = try self.convertToIndirect(value_ty, value_id); const access = spec.MemoryAccess.Extended{ .Volatile = ptr_ty.isVolatilePtr(), @@ -2072,7 +2072,7 @@ pub const DeclGen = struct { const b = try self.resolve(extra.b); const mask = self.air.values[extra.mask]; const mask_len = extra.mask_len; - const a_len = self.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(mod); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(ty); @@ -2138,9 +2138,10 @@ pub const DeclGen = struct { } fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef { + const mod = self.module; const result_ty_ref = try self.resolveType(result_ty, .direct); - switch (ptr_ty.ptrSize()) { + switch (ptr_ty.ptrSize(mod)) { .One => { // Pointer to array // TODO: Is this correct? @@ -2498,7 +2499,7 @@ pub const DeclGen = struct { // Construct new pointer type for the resulting pointer const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. const elem_ty_ref = try self.resolveType(elem_ty, .direct); - const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace())); + const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace(mod))); if (ptr_ty.isSinglePointer(mod)) { // Pointer-to-array. In this case, the resulting pointer is not of the same type // as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain. @@ -2516,7 +2517,7 @@ pub const DeclGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); // TODO: Make this return a null ptr or something if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -2526,6 +2527,7 @@ pub const DeclGen = struct { } fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const ptr_id = try self.resolve(bin_op.lhs); @@ -2536,9 +2538,9 @@ pub const DeclGen = struct { // If we have a pointer-to-array, construct an element pointer to use with load() // If we pass ptr_ty directly, it will attempt to load the entire array rather than // just an element. - var elem_ptr_info = ptr_ty.ptrInfo(); - elem_ptr_info.data.size = .One; - const elem_ptr_ty = Type.initPayload(&elem_ptr_info.base); + var elem_ptr_info = ptr_ty.ptrInfo(mod); + elem_ptr_info.size = .One; + const elem_ptr_ty = try Type.ptr(undefined, mod, elem_ptr_info); return try self.load(elem_ptr_ty, elem_ptr_id); } @@ -2586,7 +2588,7 @@ pub const DeclGen = struct { field_index: u32, ) !?IdRef { const mod = self.module; - const object_ty = object_ptr_ty.childType(); + const object_ty = object_ptr_ty.childType(mod); switch (object_ty.zigTypeTag(mod)) { .Struct => switch (object_ty.containerLayout()) { .Packed => unreachable, // TODO @@ -2662,9 +2664,10 @@ pub const DeclGen = struct { fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ptr_ty = self.typeOfIndex(inst); - assert(ptr_ty.ptrAddressSpace() == .generic); - const child_ty = ptr_ty.childType(); + assert(ptr_ty.ptrAddressSpace(mod) == .generic); + const child_ty = ptr_ty.childType(mod); const child_ty_ref = try self.resolveType(child_ty, .indirect); return try self.alloc(child_ty_ref, null); } @@ -2834,7 +2837,7 @@ pub const DeclGen = struct { const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.typeOf(un_op); - const ret_ty = ptr_ty.childType(); + const ret_ty = ptr_ty.childType(mod); if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { try self.func.body.emit(self.spv.gpa, .OpReturn, {}); @@ -2971,8 +2974,7 @@ pub const DeclGen = struct { const operand_id = try self.resolve(un_op); const optional_ty = self.typeOf(un_op); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const payload_ty = optional_ty.optionalChild(mod); const bool_ty_ref = try self.resolveType(Type.bool, .direct); diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index 1d4840aeb70f..c5ba429ec9a8 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -11,7 +11,8 @@ const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; -const ZigDecl = @import("../../Module.zig").Decl; +const ZigModule = @import("../../Module.zig"); +const ZigDecl = ZigModule.Decl; const spec = @import("spec.zig"); const Word = spec.Word; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 682431203e0d..178f9fa64c48 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -219,8 +219,7 @@ pub const DeclState = struct { try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); } else { // Non-pointer optionals are structs: struct { .maybe = *, .val = * } - var buf = try arena.create(Type.Payload.ElemType); - const payload_ty = ty.optionalChild(buf); + const payload_ty = ty.optionalChild(mod); // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata @@ -304,7 +303,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index)); } }, .Array => { @@ -315,7 +314,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index)); // DW.AT.subrange_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_dim)); // DW.AT.type, DW.FORM.ref4 @@ -323,7 +322,7 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index)); // DW.AT.count, DW.FORM.udata - const len = ty.arrayLenIncludingSentinel(); + const len = ty.arrayLenIncludingSentinel(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), len); // DW.AT.array_type delimit children try dbg_info_buffer.append(0); @@ -688,7 +687,7 @@ pub const DeclState = struct { const mod = self.mod; const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - const child_ty = if (is_ptr) ty.childType() else ty; + const child_ty = if (is_ptr) ty.childType(mod) else ty; switch (loc) { .register => |reg| { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 0154207368e0..fb7ca3a87fc3 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2931,7 +2931,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 { const atom_index = try wasm.createAtom(); const atom = wasm.getAtomPtr(atom_index); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const mod = wasm.base.options.module.?; atom.alignment = slice_ty.abiAlignment(mod); const sym_index = atom.sym_index; @@ -2988,7 +2988,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { for (mod.error_name_list.items) |error_name| { const len = @intCast(u32, error_name.len + 1); // names are 0-termianted - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const offset = @intCast(u32, atom.code.items.len); // first we create the data for the slice of the name try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated diff --git a/src/print_air.zig b/src/print_air.zig index f4a1aeae3229..8717bdc6bfe5 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -433,9 +433,10 @@ const Writer = struct { } fn writeAggregateInit(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const mod = w.module; const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const vector_ty = w.air.getRefType(ty_pl.ty); - const len = @intCast(usize, vector_ty.arrayLen()); + const len = @intCast(usize, vector_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]); try w.writeType(s, vector_ty); @@ -512,10 +513,11 @@ const Writer = struct { } fn writeSelect(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const mod = w.module; const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Bin, pl_op.payload).data; - const elem_ty = w.typeOfIndex(inst).childType(); + const elem_ty = w.typeOfIndex(inst).childType(mod); try w.writeType(s, elem_ty); try s.writeAll(", "); try w.writeOperand(s, inst, 0, pl_op.operand); diff --git a/src/type.zig b/src/type.zig index 868ae4231be8..1f970919c922 100644 --- a/src/type.zig +++ b/src/type.zig @@ -40,7 +40,7 @@ pub const Type = struct { .ptr_type => return .Pointer, .array_type => return .Array, .vector_type => return .Vector, - .optional_type => return .Optional, + .opt_type => return .Optional, .error_union_type => return .ErrorUnion, .struct_type => return .Struct, .union_type => return .Union, @@ -118,38 +118,17 @@ pub const Type = struct { .function => return .Fn, .array, - .array_u8_sentinel_0, - .array_u8, .array_sentinel, => return .Array, - .vector => return .Vector, - - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .pointer, .inferred_alloc_const, .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => return .Pointer, - .optional, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => return .Optional, + .optional => return .Optional, - .anyerror_void_error_union, .error_union => return .ErrorUnion, + .error_union => return .ErrorUnion, .anyframe_T => return .AnyFrame, @@ -177,8 +156,7 @@ pub const Type = struct { return switch (self.zigTypeTag(mod)) { .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(mod), .Optional => { - var buf: Payload.ElemType = undefined; - return self.optionalChild(&buf).baseZigTypeTag(mod); + return self.optionalChild(mod).baseZigTypeTag(mod); }, else => |t| t, }; @@ -218,8 +196,7 @@ pub const Type = struct { .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr()), .Optional => { if (!is_equality_cmp) return false; - var buf: Payload.ElemType = undefined; - return ty.optionalChild(&buf).isSelfComparable(mod, is_equality_cmp); + return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); }, }; } @@ -275,9 +252,8 @@ pub const Type = struct { } pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() { - if (self.ip_index != .none) { - return null; - } + assert(self.ip_index == .none); + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; @@ -287,281 +263,61 @@ pub const Type = struct { return null; } - pub fn castPointer(self: Type) ?*Payload.ElemType { - return switch (self.tag()) { - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => self.cast(Payload.ElemType), - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - else => null, - }; - } - /// If it is a function pointer, returns the function type. Otherwise returns null. pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { if (ty.zigTypeTag(mod) != .Pointer) return null; - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); if (elem_ty.zigTypeTag(mod) != .Fn) return null; return elem_ty; } - pub fn ptrIsMutable(ty: Type) bool { - return switch (ty.tag()) { - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .many_const_pointer, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .c_const_pointer, - .const_slice, - => false, - - .single_mut_pointer, - .many_mut_pointer, - .manyptr_u8, - .c_mut_pointer, - .mut_slice, - => true, - - .pointer => ty.castTag(.pointer).?.data.mutable, - - else => unreachable, + pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.mutable, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| !ptr_type.is_const, + else => unreachable, + }, }; } - pub const ArrayInfo = struct { elem_type: Type, sentinel: ?Value = null, len: u64 }; - pub fn arrayInfo(self: Type) ArrayInfo { + pub const ArrayInfo = struct { + elem_type: Type, + sentinel: ?Value = null, + len: u64, + }; + + pub fn arrayInfo(self: Type, mod: *const Module) ArrayInfo { return .{ - .len = self.arrayLen(), - .sentinel = self.sentinel(), - .elem_type = self.elemType(), + .len = self.arrayLen(mod), + .sentinel = self.sentinel(mod), + .elem_type = self.childType(mod), }; } - pub fn ptrInfo(self: Type) Payload.Pointer { - switch (self.ip_index) { - .none => switch (self.tag()) { - .single_const_pointer_to_comptime_int => return .{ .data = .{ - .pointee_type = Type.comptime_int, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .const_slice_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .const_slice_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = Value.zero, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .single_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .single_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .One, - } }, - .many_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_const_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_const_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = Value.zero, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .many_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Many, - } }, - .c_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = true, - .mutable = false, - .@"volatile" = false, - .size = .C, - } }, - .c_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = true, - .mutable = true, - .@"volatile" = false, - .size = .C, - } }, - .const_slice => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .mut_slice => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Slice, - } }, - - .pointer => return self.castTag(.pointer).?.*, - - .optional_single_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .One, - } }, - .optional_single_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - return child_type.ptrInfo(); + pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data, + .optional => b: { + const child_type = ty.optionalChild(mod); + break :b child_type.ptrInfo(mod); }, else => unreachable, }, - else => @panic("TODO"), - } + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |p| Payload.Pointer.Data.fromKey(p), + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| Payload.Pointer.Data.fromKey(p), + else => unreachable, + }, + else => unreachable, + }, + }; } pub fn eql(a: Type, b: Type, mod: *Module) bool { @@ -658,20 +414,17 @@ pub const Type = struct { }, .array, - .array_u8_sentinel_0, - .array_u8, .array_sentinel, - .vector, => { if (a.zigTypeTag(mod) != b.zigTypeTag(mod)) return false; - if (a.arrayLen() != b.arrayLen()) + if (a.arrayLen(mod) != b.arrayLen(mod)) return false; - const elem_ty = a.elemType(); - if (!elem_ty.eql(b.elemType(), mod)) + const elem_ty = a.childType(mod); + if (!elem_ty.eql(b.childType(mod), mod)) return false; - const sentinel_a = a.sentinel(); - const sentinel_b = b.sentinel(); + const sentinel_a = a.sentinel(mod); + const sentinel_b = b.sentinel(mod); if (sentinel_a) |sa| { if (sentinel_b) |sb| { return sa.eql(sb, elem_ty, mod); @@ -683,28 +436,14 @@ pub const Type = struct { } }, - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .pointer, .inferred_alloc_const, .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => { if (b.zigTypeTag(mod) != .Pointer) return false; - const info_a = a.ptrInfo().data; - const info_b = b.ptrInfo().data; + const info_a = a.ptrInfo(mod); + const info_b = b.ptrInfo(mod); if (!info_a.pointee_type.eql(info_b.pointee_type, mod)) return false; if (info_a.@"align" != info_b.@"align") @@ -743,18 +482,13 @@ pub const Type = struct { return true; }, - .optional, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { + .optional => { if (b.zigTypeTag(mod) != .Optional) return false; - var buf_a: Payload.ElemType = undefined; - var buf_b: Payload.ElemType = undefined; - return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b), mod); + return a.optionalChild(mod).eql(b.optionalChild(mod), mod); }, - .anyerror_void_error_union, .error_union => { + .error_union => { if (b.zigTypeTag(mod) != .ErrorUnion) return false; const a_set = a.errorUnionSet(); @@ -947,47 +681,23 @@ pub const Type = struct { }, .array, - .array_u8_sentinel_0, - .array_u8, .array_sentinel, => { std.hash.autoHash(hasher, std.builtin.TypeId.Array); - const elem_ty = ty.elemType(); - std.hash.autoHash(hasher, ty.arrayLen()); + const elem_ty = ty.childType(mod); + std.hash.autoHash(hasher, ty.arrayLen(mod)); hashWithHasher(elem_ty, hasher, mod); - hashSentinel(ty.sentinel(), elem_ty, hasher, mod); + hashSentinel(ty.sentinel(mod), elem_ty, hasher, mod); }, - .vector => { - std.hash.autoHash(hasher, std.builtin.TypeId.Vector); - - const elem_ty = ty.elemType(); - std.hash.autoHash(hasher, ty.vectorLen()); - hashWithHasher(elem_ty, hasher, mod); - }, - - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .pointer, .inferred_alloc_const, .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => { std.hash.autoHash(hasher, std.builtin.TypeId.Pointer); - const info = ty.ptrInfo().data; + const info = ty.ptrInfo(mod); hashWithHasher(info.pointee_type, hasher, mod); hashSentinel(info.sentinel, info.pointee_type, hasher, mod); std.hash.autoHash(hasher, info.@"align"); @@ -1001,17 +711,13 @@ pub const Type = struct { std.hash.autoHash(hasher, info.size); }, - .optional, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { + .optional => { std.hash.autoHash(hasher, std.builtin.TypeId.Optional); - var buf: Payload.ElemType = undefined; - hashWithHasher(ty.optionalChild(&buf), hasher, mod); + hashWithHasher(ty.optionalChild(mod), hasher, mod); }, - .anyerror_void_error_union, .error_union => { + .error_union => { std.hash.autoHash(hasher, std.builtin.TypeId.ErrorUnion); const set_ty = ty.errorUnionSet(); @@ -1023,7 +729,7 @@ pub const Type = struct { .anyframe_T => { std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame); - hashWithHasher(ty.childType(), hasher, mod); + hashWithHasher(ty.childType(mod), hasher, mod); }, .empty_struct => { @@ -1129,33 +835,12 @@ pub const Type = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, .inferred_alloc_const, .inferred_alloc_mut, .empty_struct_literal, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => unreachable, - .array_u8, - .array_u8_sentinel_0, - => return self.copyPayloadShallow(allocator, Payload.Len), - - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, .anyframe_T, => { const payload = self.cast(Payload.ElemType).?; @@ -1170,13 +855,6 @@ pub const Type = struct { }; }, - .vector => { - const payload = self.castTag(.vector).?.data; - return Tag.vector.create(allocator, .{ - .len = payload.len, - .elem_type = try payload.elem_type.copy(allocator), - }); - }, .array => { const payload = self.castTag(.array).?.data; return Tag.array.create(allocator, .{ @@ -1408,13 +1086,6 @@ pub const Type = struct { }); }, - .anyerror_void_error_union => return writer.writeAll("anyerror!void"), - .const_slice_u8 => return writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0 => return writer.writeAll("[:0]const u8"), - .single_const_pointer_to_comptime_int => return writer.writeAll("*const comptime_int"), - .manyptr_u8 => return writer.writeAll("[*]u8"), - .manyptr_const_u8 => return writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0 => return writer.writeAll("[*:0]const u8"), .function => { const payload = ty.castTag(.function).?.data; try writer.writeAll("fn("); @@ -1447,20 +1118,6 @@ pub const Type = struct { ty = return_type; continue; }, - .array_u8 => { - const len = ty.castTag(.array_u8).?.data; - return writer.print("[{d}]u8", .{len}); - }, - .array_u8_sentinel_0 => { - const len = ty.castTag(.array_u8_sentinel_0).?.data; - return writer.print("[{d}:0]u8", .{len}); - }, - .vector => { - const payload = ty.castTag(.vector).?.data; - try writer.print("@Vector({d}, ", .{payload.len}); - try payload.elem_type.dump("", .{}, writer); - return writer.writeAll(")"); - }, .array => { const payload = ty.castTag(.array).?.data; try writer.print("[{d}]", .{payload.len}); @@ -1512,72 +1169,12 @@ pub const Type = struct { try writer.writeAll("}"); return; }, - .single_const_pointer => { - const pointee_type = ty.castTag(.single_const_pointer).?.data; - try writer.writeAll("*const "); - ty = pointee_type; - continue; - }, - .single_mut_pointer => { - const pointee_type = ty.castTag(.single_mut_pointer).?.data; - try writer.writeAll("*"); - ty = pointee_type; - continue; - }, - .many_const_pointer => { - const pointee_type = ty.castTag(.many_const_pointer).?.data; - try writer.writeAll("[*]const "); - ty = pointee_type; - continue; - }, - .many_mut_pointer => { - const pointee_type = ty.castTag(.many_mut_pointer).?.data; - try writer.writeAll("[*]"); - ty = pointee_type; - continue; - }, - .c_const_pointer => { - const pointee_type = ty.castTag(.c_const_pointer).?.data; - try writer.writeAll("[*c]const "); - ty = pointee_type; - continue; - }, - .c_mut_pointer => { - const pointee_type = ty.castTag(.c_mut_pointer).?.data; - try writer.writeAll("[*c]"); - ty = pointee_type; - continue; - }, - .const_slice => { - const pointee_type = ty.castTag(.const_slice).?.data; - try writer.writeAll("[]const "); - ty = pointee_type; - continue; - }, - .mut_slice => { - const pointee_type = ty.castTag(.mut_slice).?.data; - try writer.writeAll("[]"); - ty = pointee_type; - continue; - }, .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); ty = child_type; continue; }, - .optional_single_const_pointer => { - const pointee_type = ty.castTag(.optional_single_const_pointer).?.data; - try writer.writeAll("?*const "); - ty = pointee_type; - continue; - }, - .optional_single_mut_pointer => { - const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data; - try writer.writeAll("?*"); - ty = pointee_type; - continue; - }, .pointer => { const payload = ty.castTag(.pointer).?.data; @@ -1680,7 +1277,7 @@ pub const Type = struct { .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |s| return writer.writeAll(@tagName(s)), .struct_type => @panic("TODO"), @@ -1733,14 +1330,6 @@ pub const Type = struct { try decl.renderFullyQualifiedName(mod, writer); }, - .anyerror_void_error_union => try writer.writeAll("anyerror!void"), - .const_slice_u8 => try writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0 => try writer.writeAll("[:0]const u8"), - .single_const_pointer_to_comptime_int => try writer.writeAll("*const comptime_int"), - .manyptr_u8 => try writer.writeAll("[*]u8"), - .manyptr_const_u8 => try writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0 => try writer.writeAll("[*:0]const u8"), - .error_set_inferred => { const func = ty.castTag(.error_set_inferred).?.data.func; @@ -1799,20 +1388,6 @@ pub const Type = struct { try print(error_union.payload, writer, mod); }, - .array_u8 => { - const len = ty.castTag(.array_u8).?.data; - try writer.print("[{d}]u8", .{len}); - }, - .array_u8_sentinel_0 => { - const len = ty.castTag(.array_u8_sentinel_0).?.data; - try writer.print("[{d}:0]u8", .{len}); - }, - .vector => { - const payload = ty.castTag(.vector).?.data; - try writer.print("@Vector({d}, ", .{payload.len}); - try print(payload.elem_type, writer, mod); - try writer.writeAll(")"); - }, .array => { const payload = ty.castTag(.array).?.data; try writer.print("[{d}]", .{payload.len}); @@ -1865,17 +1440,8 @@ pub const Type = struct { try writer.writeAll("}"); }, - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const info = ty.ptrInfo().data; + .pointer => { + const info = ty.ptrInfo(mod); if (info.sentinel) |s| switch (info.size) { .One, .C => unreachable, @@ -1920,16 +1486,6 @@ pub const Type = struct { try writer.writeByte('?'); try print(child_type, writer, mod); }, - .optional_single_mut_pointer => { - const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data; - try writer.writeAll("?*"); - try print(pointee_type, writer, mod); - }, - .optional_single_const_pointer => { - const pointee_type = ty.castTag(.optional_single_const_pointer).?.data; - try writer.writeAll("?*const "); - try print(pointee_type, writer, mod); - }, .anyframe_T => { const return_type = ty.castTag(.anyframe_T).?.data; try writer.print("anyframe->", .{}); @@ -1963,12 +1519,6 @@ pub const Type = struct { pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { if (self.ip_index != .none) return self.ip_index.toValue(); switch (self.tag()) { - .single_const_pointer_to_comptime_int => return Value{ .ip_index = .single_const_pointer_to_comptime_int_type, .legacy = undefined }, - .const_slice_u8 => return Value{ .ip_index = .const_slice_u8_type, .legacy = undefined }, - .const_slice_u8_sentinel_0 => return Value{ .ip_index = .const_slice_u8_sentinel_0_type, .legacy = undefined }, - .manyptr_u8 => return Value{ .ip_index = .manyptr_u8_type, .legacy = undefined }, - .manyptr_const_u8 => return Value{ .ip_index = .manyptr_const_u8_type, .legacy = undefined }, - .manyptr_const_u8_sentinel_0 => return Value{ .ip_index = .manyptr_const_u8_sentinel_0_type, .legacy = undefined }, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, else => return Value.Tag.ty.create(allocator, self), @@ -1996,10 +1546,41 @@ pub const Type = struct { ) RuntimeBitsError!bool { if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| return int_type.bits != 0, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .ptr_type => |ptr_type| { + // Pointers to zero-bit types still have a runtime address; however, pointers + // to comptime-only types do not, with the exception of function pointers. + if (ignore_comptime_only) return true; + const child_ty = ptr_type.elem_type.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) return !child_ty.fnInfo().is_generic; + if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty)); + return !comptimeOnly(ty, mod); + }, + .array_type => |array_type| { + if (array_type.sentinel != .none) { + return array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + } else { + return array_type.len > 0 and + try array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + } + }, + .vector_type => |vector_type| { + return vector_type.len > 0 and + try vector_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + }, + .opt_type => |child| { + const child_ty = child.toType(); + if (child_ty.isNoReturn()) { + // Then the optional is comptime-known to be null. + return false; + } + if (ignore_comptime_only) { + return true; + } else if (strat == .sema) { + return !(try strat.sema.typeRequiresComptime(child_ty)); + } else { + return !comptimeOnly(child_ty, mod); + } + }, .error_union_type => @panic("TODO"), .simple_type => |t| return switch (t) { .f16, @@ -2058,14 +1639,7 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; switch (ty.tag()) { - .const_slice_u8, - .const_slice_u8_sentinel_0, - .array_u8_sentinel_0, - .anyerror_void_error_union, .error_set_inferred, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, .@"opaque", .error_set_single, @@ -2077,22 +1651,12 @@ pub const Type = struct { // Pointers to zero-bit types still have a runtime address; however, pointers // to comptime-only types do not, with the exception of function pointers. .anyframe_T, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .pointer, => { if (ignore_comptime_only) { return true; - } else if (ty.childType().zigTypeTag(mod) == .Fn) { - return !ty.childType().fnInfo().is_generic; + } else if (ty.childType(mod).zigTypeTag(mod) == .Fn) { + return !ty.childType(mod).fnInfo().is_generic; } else if (strat == .sema) { return !(try strat.sema.typeRequiresComptime(ty)); } else { @@ -2101,7 +1665,6 @@ pub const Type = struct { }, // These are false because they are comptime-only types. - .single_const_pointer_to_comptime_int, .empty_struct, .empty_struct_literal, // These are function *bodies*, not pointers. @@ -2111,8 +1674,7 @@ pub const Type = struct { => return false, .optional => { - var buf: Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn()) { // Then the optional is comptime-known to be null. return false; @@ -2200,10 +1762,9 @@ pub const Type = struct { } }, - .array, .vector => return ty.arrayLen() != 0 and - try ty.elemType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - .array_u8 => return ty.arrayLen() != 0, - .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .array => return ty.arrayLen(mod) != 0 and + try ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .array_sentinel => return ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .tuple, .anon_struct => { const tuple = ty.tupleFields(); @@ -2224,14 +1785,14 @@ pub const Type = struct { /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout pub fn hasWellDefinedLayout(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => return true, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), - .error_union_type => @panic("TODO"), - .simple_type => |t| return switch (t) { + if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => true, + .ptr_type => true, + .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), + .vector_type => true, + .opt_type => |child| child.toType().isPtrLikeOptional(mod), + .error_union_type => false, + .simple_type => |t| switch (t) { .f16, .f32, .f64, @@ -2287,23 +1848,8 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; return switch (ty.tag()) { - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .array_u8, - .array_u8_sentinel_0, .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer_to_comptime_int, .enum_numbered, - .vector, - .optional_single_mut_pointer, - .optional_single_const_pointer, => true, .error_set, @@ -2313,13 +1859,8 @@ pub const Type = struct { .@"opaque", // These are function bodies, not function pointers. .function, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, .enum_simple, .error_union, - .anyerror_void_error_union, .anyframe_T, .tuple, .anon_struct, @@ -2336,7 +1877,7 @@ pub const Type = struct { .array, .array_sentinel, - => ty.childType().hasWellDefinedLayout(mod), + => ty.childType(mod).hasWellDefinedLayout(mod), .optional => ty.isPtrLikeOptional(mod), .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto, @@ -2417,76 +1958,36 @@ pub const Type = struct { } pub fn ptrAlignmentAdvanced(ty: Type, mod: *const Module, opt_sema: ?*Sema) !u32 { - switch (ty.tag()) { - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { - const child_type = ty.cast(Payload.ElemType).?.data; - if (opt_sema) |sema| { - const res = try child_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } - return (child_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - }, - - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return 1, + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => { + const ptr_info = ty.castTag(.pointer).?.data; + if (ptr_info.@"align" != 0) { + return ptr_info.@"align"; + } else if (opt_sema) |sema| { + const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); + return res.scalar; + } else { + return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; + } + }, + .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema), - .pointer => { - const ptr_info = ty.castTag(.pointer).?.data; - if (ptr_info.@"align" != 0) { - return ptr_info.@"align"; - } else if (opt_sema) |sema| { - const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } else { - return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - } + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => @panic("TODO"), }, - .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema), - - else => unreachable, } } - pub fn ptrAddressSpace(self: Type) std.builtin.AddressSpace { + pub fn ptrAddressSpace(self: Type, mod: *const Module) std.builtin.AddressSpace { return switch (self.tag()) { - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .inferred_alloc_const, - .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => .generic, - .pointer => self.castTag(.pointer).?.data.@"addrspace", .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - return child_type.ptrAddressSpace(); + const child_type = self.optionalChild(mod); + return child_type.ptrAddressSpace(mod); }, else => unreachable, @@ -2530,15 +2031,31 @@ pub const Type = struct { ) Module.CompileError!AbiAlignmentAdvanced { const target = mod.getTarget(); + const opt_sema = switch (strat) { + .sema => |sema| sema, + else => null, + }; + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) }; }, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .ptr_type => { + return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; + }, + .array_type => |array_type| { + return array_type.child.toType().abiAlignmentAdvanced(mod, strat); + }, + .vector_type => |vector_type| { + const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema); + const bits = @intCast(u32, bits_u64); + const bytes = ((bits * vector_type.len) + 7) / 8; + const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); + return AbiAlignmentAdvanced{ .scalar = alignment }; + }, + + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .bool, @@ -2617,15 +2134,8 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; - const opt_sema = switch (strat) { - .sema => |sema| sema, - else => null, - }; switch (ty.tag()) { - .array_u8_sentinel_0, - .array_u8, - .@"opaque", - => return AbiAlignmentAdvanced{ .scalar = 1 }, + .@"opaque" => return AbiAlignmentAdvanced{ .scalar = 1 }, // represents machine code; not a pointer .function => { @@ -2634,47 +2144,21 @@ pub const Type = struct { return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; }, - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_const_pointer, - .optional_single_mut_pointer, .pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, // TODO revisit this when we have the concept of the error tag type - .anyerror_void_error_union, .error_set_inferred, .error_set_single, .error_set, .error_set_merged, => return AbiAlignmentAdvanced{ .scalar = 2 }, - .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(mod, strat), - - .vector => { - const len = ty.arrayLen(); - const bits = try bitSizeAdvanced(ty.elemType(), mod, opt_sema); - const bytes = ((bits * len) + 7) / 8; - const alignment = std.math.ceilPowerOfTwoAssert(u64, bytes); - return AbiAlignmentAdvanced{ .scalar = @intCast(u32, alignment) }; - }, + .array, .array_sentinel => return ty.childType(mod).abiAlignmentAdvanced(mod, strat), .optional => { - var buf: Payload.ElemType = undefined; - const child_type = ty.optionalChild(&buf); + const child_type = ty.optionalChild(mod); switch (child_type.zigTypeTag(mod)) { .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, @@ -2933,8 +2417,29 @@ pub const Type = struct { }, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| { + const opt_sema = switch (strat) { + .sema => |sema| sema, + .eager => null, + .lazy => |arena| return AbiSizeAdvanced{ + .val = try Value.Tag.lazy_size.create(arena, ty), + }, + }; + const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); + const elem_bits = @intCast(u32, elem_bits_u64); + const total_bits = elem_bits * vector_type.len; + const total_bytes = (total_bits + 7) / 8; + const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| x, + .val => return AbiSizeAdvanced{ + .val = try Value.Tag.lazy_size.create(strat.lazy, ty), + }, + }; + const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); + return AbiSizeAdvanced{ .scalar = result }; + }, + + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .bool, @@ -3014,7 +2519,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .single_const_pointer_to_comptime_int, .empty_struct_literal, .empty_struct, => return AbiSizeAdvanced{ .scalar = 0 }, @@ -3068,8 +2572,6 @@ pub const Type = struct { return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true); }, - .array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data }, - .array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 }, .array => { const payload = ty.castTag(.array).?.data; switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { @@ -3093,47 +2595,7 @@ pub const Type = struct { } }, - .vector => { - const payload = ty.castTag(.vector).?.data; - const opt_sema = switch (strat) { - .sema => |sema| sema, - .eager => null, - .lazy => |arena| return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(arena, ty), - }, - }; - const elem_bits = try payload.elem_type.bitSizeAdvanced(mod, opt_sema); - const total_bits = elem_bits * payload.len; - const total_bytes = (total_bits + 7) / 8; - const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |x| x, - .val => return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(strat.lazy, ty), - }, - }; - const result = std.mem.alignForwardGeneric(u64, total_bytes, alignment); - return AbiSizeAdvanced{ .scalar = result }; - }, - - .anyframe_T, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, + .anyframe_T => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .pointer => switch (ty.castTag(.pointer).?.data.size) { .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, @@ -3141,7 +2603,6 @@ pub const Type = struct { }, // TODO revisit this when we have the concept of the error tag type - .anyerror_void_error_union, .error_set_inferred, .error_set, .error_set_merged, @@ -3149,8 +2610,7 @@ pub const Type = struct { => return AbiSizeAdvanced{ .scalar = 2 }, .optional => { - var buf: Payload.ElemType = undefined; - const child_type = ty.optionalChild(&buf); + const child_type = ty.optionalChild(mod); if (child_type.isNoReturn()) { return AbiSizeAdvanced{ .scalar = 0 }; @@ -3272,8 +2732,12 @@ pub const Type = struct { .int_type => |int_type| return int_type.bits, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| { + const child_ty = vector_type.child.toType(); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); + return elem_bit_size * vector_type.len; + }, + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16 => return 16, @@ -3339,7 +2803,6 @@ pub const Type = struct { switch (ty.tag()) { .function => unreachable, // represents machine code; not a pointer - .single_const_pointer_to_comptime_int => unreachable, .empty_struct => unreachable, .empty_struct_literal => unreachable, .inferred_alloc_const => unreachable, @@ -3388,13 +2851,6 @@ pub const Type = struct { return size; }, - .vector => { - const payload = ty.castTag(.vector).?.data; - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); - return elem_bit_size * payload.len; - }, - .array_u8 => return 8 * ty.castTag(.array_u8).?.data, - .array_u8_sentinel_0 => return 8 * (ty.castTag(.array_u8_sentinel_0).?.data + 1), .array => { const payload = ty.castTag(.array).?.data; const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); @@ -3415,43 +2871,13 @@ pub const Type = struct { .anyframe_T => return target.ptrBitWidth(), - .const_slice, - .mut_slice, - => return target.ptrBitWidth() * 2, - - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return target.ptrBitWidth() * 2, - - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { - return target.ptrBitWidth(); - }, - - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => { - return target.ptrBitWidth(); - }, - .pointer => switch (ty.castTag(.pointer).?.data.size) { .Slice => return target.ptrBitWidth() * 2, else => return target.ptrBitWidth(), }, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return target.ptrBitWidth(), - .error_set, .error_set_single, - .anyerror_void_error_union, .error_set_inferred, .error_set_merged, => return 16, // TODO revisit this when we have the concept of the error tag type @@ -3481,12 +2907,11 @@ pub const Type = struct { return true; }, .Array => { - if (ty.arrayLenIncludingSentinel() == 0) return true; - return ty.childType().layoutIsResolved(mod); + if (ty.arrayLenIncludingSentinel(mod) == 0) return true; + return ty.childType(mod).layoutIsResolved(mod); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); return payload_ty.layoutIsResolved(mod); }, .ErrorUnion => { @@ -3500,9 +2925,6 @@ pub const Type = struct { pub fn isSinglePointer(ty: Type, mod: *const Module) bool { switch (ty.ip_index) { .none => return switch (ty.tag()) { - .single_const_pointer, - .single_mut_pointer, - .single_const_pointer_to_comptime_int, .inferred_alloc_const, .inferred_alloc_mut, => true, @@ -3519,54 +2941,33 @@ pub const Type = struct { } /// Asserts `ty` is a pointer. - pub fn ptrSize(ty: Type) std.builtin.Type.Pointer.Size { - return ptrSizeOrNull(ty).?; + pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size { + return ptrSizeOrNull(ty, mod).?; } /// Returns `null` if `ty` is not a pointer. - pub fn ptrSizeOrNull(ty: Type) ?std.builtin.Type.Pointer.Size { - return switch (ty.tag()) { - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => .Slice, - - .many_const_pointer, - .many_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => .Many, - - .c_const_pointer, - .c_mut_pointer, - => .C, - - .single_const_pointer, - .single_mut_pointer, - .single_const_pointer_to_comptime_int, - .inferred_alloc_const, - .inferred_alloc_mut, - => .One, + pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .inferred_alloc_const, + .inferred_alloc_mut, + => .One, - .pointer => ty.castTag(.pointer).?.data.size, + .pointer => ty.castTag(.pointer).?.data.size, - else => null, + else => null, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_info| ptr_info.size, + else => null, + }, }; } pub fn isSlice(ty: Type, mod: *const Module) bool { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => true, - .pointer => ty.castTag(.pointer).?.data.size == .Slice, - else => false, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -3583,78 +2984,28 @@ pub const Type = struct { pub fn slicePtrFieldType(self: Type, buffer: *SlicePtrFieldTypeBuffer) Type { switch (self.tag()) { - .const_slice_u8 => return Type.initTag(.manyptr_const_u8), - .const_slice_u8_sentinel_0 => return Type.initTag(.manyptr_const_u8_sentinel_0), - - .const_slice => { - const elem_type = self.castTag(.const_slice).?.data; - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_const_pointer }, - .data = elem_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - }, - .mut_slice => { - const elem_type = self.castTag(.mut_slice).?.data; - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_mut_pointer }, - .data = elem_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - }, - .pointer => { const payload = self.castTag(.pointer).?.data; assert(payload.size == .Slice); - if (payload.sentinel != null or - payload.@"align" != 0 or - payload.@"addrspace" != .generic or - payload.bit_offset != 0 or - payload.host_size != 0 or - payload.vector_index != .none or - payload.@"allowzero" or - payload.@"volatile") - { - buffer.* = .{ - .pointer = .{ - .data = .{ - .pointee_type = payload.pointee_type, - .sentinel = payload.sentinel, - .@"align" = payload.@"align", - .@"addrspace" = payload.@"addrspace", - .bit_offset = payload.bit_offset, - .host_size = payload.host_size, - .vector_index = payload.vector_index, - .@"allowzero" = payload.@"allowzero", - .mutable = payload.mutable, - .@"volatile" = payload.@"volatile", - .size = .Many, - }, - }, - }; - return Type.initPayload(&buffer.pointer.base); - } else if (payload.mutable) { - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_mut_pointer }, - .data = payload.pointee_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - } else { - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_const_pointer }, - .data = payload.pointee_type, + buffer.* = .{ + .pointer = .{ + .data = .{ + .pointee_type = payload.pointee_type, + .sentinel = payload.sentinel, + .@"align" = payload.@"align", + .@"addrspace" = payload.@"addrspace", + .bit_offset = payload.bit_offset, + .host_size = payload.host_size, + .vector_index = payload.vector_index, + .@"allowzero" = payload.@"allowzero", + .mutable = payload.mutable, + .@"volatile" = payload.@"volatile", + .size = .Many, }, - }; - return Type.initPayload(&buffer.elem_type.base); - } + }, + }; + return Type.initPayload(&buffer.pointer.base); }, else => unreachable, @@ -3663,19 +3014,7 @@ pub const Type = struct { pub fn isConstPtr(self: Type) bool { return switch (self.tag()) { - .single_const_pointer, - .many_const_pointer, - .c_const_pointer, - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => true, - .pointer => !self.castTag(.pointer).?.data.mutable, - else => false, }; } @@ -3702,49 +3041,46 @@ pub const Type = struct { pub fn isCPtr(self: Type) bool { return switch (self.tag()) { - .c_const_pointer, - .c_mut_pointer, - => return true, - .pointer => self.castTag(.pointer).?.data.size == .C, else => return false, }; } - pub fn isPtrAtRuntime(self: Type, mod: *const Module) bool { - switch (self.tag()) { - .c_const_pointer, - .c_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .manyptr_u8, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .single_const_pointer, - .single_const_pointer_to_comptime_int, - .single_mut_pointer, - => return true, + pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => switch (ty.castTag(.pointer).?.data.size) { + .Slice => return false, + .One, .Many, .C => return true, + }, - .pointer => switch (self.castTag(.pointer).?.data.size) { - .Slice => return false, - .One, .Many, .C => return true, - }, + .optional => { + const child_type = ty.optionalChild(mod); + if (child_type.zigTypeTag(mod) != .Pointer) return false; + const info = child_type.ptrInfo(mod); + switch (info.size) { + .Slice, .C => return false, + .Many, .One => return !info.@"allowzero", + } + }, - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - if (child_type.zigTypeTag(mod) != .Pointer) return false; - const info = child_type.ptrInfo().data; - switch (info.size) { - .Slice, .C => return false, - .Many, .One => return !info.@"allowzero", - } + else => return false, + }, + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => false, + .One, .Many, .C => true, + }, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| switch (p.size) { + .Slice, .C => false, + .Many, .One => !p.is_allowzero, + }, + else => false, + }, + else => false, }, - - else => return false, } } @@ -3754,23 +3090,17 @@ pub const Type = struct { if (ty.isPtrLikeOptional(mod)) { return true; } - return ty.ptrInfo().data.@"allowzero"; + return ty.ptrInfo(mod).@"allowzero"; } /// See also `isPtrLikeOptional`. pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { switch (ty.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return true, - .optional => { const child_ty = ty.castTag(.optional).?.data; switch (child_ty.zigTypeTag(mod)) { .Pointer => { - const info = child_ty.ptrInfo().data; + const info = child_ty.ptrInfo(mod); switch (info.size) { .C => return false, .Slice, .Many, .One => return !info.@"allowzero", @@ -3793,7 +3123,7 @@ pub const Type = struct { pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.size == .C, - .optional_type => |o| switch (mod.intern_pool.indexToKey(o.payload_type)) { + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice, .C => false, .Many, .One => !ptr_type.is_allowzero, @@ -3803,16 +3133,10 @@ pub const Type = struct { else => false, }; switch (ty.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return true, - .optional => { const child_ty = ty.castTag(.optional).?.data; if (child_ty.zigTypeTag(mod) != .Pointer) return false; - const info = child_ty.ptrInfo().data; + const info = child_ty.ptrInfo(mod); switch (info.size) { .Slice, .C => return false, .Many, .One => return !info.@"allowzero", @@ -3828,43 +3152,24 @@ pub const Type = struct { /// For *[N]T, returns [N]T. /// For *T, returns T. /// For [*]T, returns T. - pub fn childType(ty: Type) Type { - return switch (ty.tag()) { - .vector => ty.castTag(.vector).?.data.elem_type, - .array => ty.castTag(.array).?.data.elem_type, - .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => ty.castPointer().?.data, - - .array_u8, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => Type.u8, - - .single_const_pointer_to_comptime_int => Type.comptime_int, - .pointer => ty.castTag(.pointer).?.data.pointee_type, + pub fn childType(ty: Type, mod: *const Module) Type { + return childTypeIp(ty, mod.intern_pool); + } - else => unreachable, + pub fn childTypeIp(ty: Type, ip: InternPool) Type { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .array => ty.castTag(.array).?.data.elem_type, + .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, + + .pointer => ty.castTag(.pointer).?.data.pointee_type, + + else => unreachable, + }, + else => ip.childType(ty.ip_index).toType(), }; } - /// Asserts the type is a pointer or array type. - /// TODO this is deprecated in favor of `childType`. - pub const elemType = childType; - /// For *[N]T, returns T. /// For ?*T, returns T. /// For ?*[N]T, returns T. @@ -3875,54 +3180,42 @@ pub const Type = struct { /// For []T, returns T. /// For anyframe->T, returns T. pub fn elemType2(ty: Type, mod: *const Module) Type { - return switch (ty.tag()) { - .vector => ty.castTag(.vector).?.data.elem_type, - .array => ty.castTag(.array).?.data.elem_type, - .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => ty.castPointer().?.data, - - .single_const_pointer, - .single_mut_pointer, - => ty.castPointer().?.data.shallowElemType(mod), - - .array_u8, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => Type.u8, - - .single_const_pointer_to_comptime_int => Type.comptime_int, - .pointer => { - const info = ty.castTag(.pointer).?.data; - const child_ty = info.pointee_type; - if (info.size == .One) { - return child_ty.shallowElemType(mod); - } else { - return child_ty; - } - }, - .optional => ty.castTag(.optional).?.data.childType(), - .optional_single_mut_pointer => ty.castPointer().?.data, - .optional_single_const_pointer => ty.castPointer().?.data, + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .array => ty.castTag(.array).?.data.elem_type, + .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, + + .pointer => { + const info = ty.castTag(.pointer).?.data; + const child_ty = info.pointee_type; + if (info.size == .One) { + return child_ty.shallowElemType(mod); + } else { + return child_ty; + } + }, + .optional => ty.castTag(.optional).?.data.childType(mod), - .anyframe_T => ty.castTag(.anyframe_T).?.data, + .anyframe_T => ty.castTag(.anyframe_T).?.data, - else => unreachable, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .One => ptr_type.elem_type.toType().shallowElemType(mod), + .Many, .C, .Slice => ptr_type.elem_type.toType(), + }, + .vector_type => |vector_type| vector_type.child.toType(), + .array_type => |array_type| array_type.child.toType(), + .opt_type => |child| mod.intern_pool.childType(child).toType(), + else => unreachable, + }, }; } fn shallowElemType(child_ty: Type, mod: *const Module) Type { return switch (child_ty.zigTypeTag(mod)) { - .Array, .Vector => child_ty.childType(), + .Array, .Vector => child_ty.childType(mod), else => child_ty, }; } @@ -3930,7 +3223,7 @@ pub const Type = struct { /// For vectors, returns the element type. Otherwise returns self. pub fn scalarType(ty: Type, mod: *const Module) Type { return switch (ty.zigTypeTag(mod)) { - .Vector => ty.childType(), + .Vector => ty.childType(mod), else => ty, }; } @@ -3938,51 +3231,25 @@ pub const Type = struct { /// Asserts that the type is an optional. /// Resulting `Type` will have inner memory referencing `buf`. /// Note that for C pointers this returns the type unmodified. - pub fn optionalChild(ty: Type, buf: *Payload.ElemType) Type { - return switch (ty.tag()) { - .optional => ty.castTag(.optional).?.data, - .optional_single_mut_pointer => { - buf.* = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty.castPointer().?.data, - }; - return Type.initPayload(&buf.base); - }, - .optional_single_const_pointer => { - buf.* = .{ - .base = .{ .tag = .single_const_pointer }, - .data = ty.castPointer().?.data, - }; - return Type.initPayload(&buf.base); - }, + pub fn optionalChild(ty: Type, mod: *const Module) Type { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .optional => ty.castTag(.optional).?.data, - .pointer, // here we assume it is a C pointer - .c_const_pointer, - .c_mut_pointer, - => return ty, + .pointer, // here we assume it is a C pointer + => return ty, - else => unreachable, - }; - } - - /// Asserts that the type is an optional. - /// Same as `optionalChild` but allocates the buffer if needed. - pub fn optionalChildAlloc(ty: Type, allocator: Allocator) !Type { - switch (ty.tag()) { - .optional => return ty.castTag(.optional).?.data, - .optional_single_mut_pointer => { - return Tag.single_mut_pointer.create(allocator, ty.castPointer().?.data); + else => unreachable, }, - .optional_single_const_pointer => { - return Tag.single_const_pointer.create(allocator, ty.castPointer().?.data); + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .opt_type => |child| child.toType(), + .ptr_type => |ptr_type| b: { + assert(ptr_type.size == .C); + break :b ty; + }, + else => unreachable, }, - .pointer, // here we assume it is a C pointer - .c_const_pointer, - .c_mut_pointer, - => return ty, - - else => unreachable, - } + }; } /// Returns the tag type of a union, if the type is a union and it has a tag type. @@ -4071,19 +3338,25 @@ pub const Type = struct { } /// Asserts that the type is an error union. - pub fn errorUnionPayload(self: Type) Type { - return switch (self.tag()) { - .anyerror_void_error_union => Type.void, - .error_union => self.castTag(.error_union).?.data.payload, - else => unreachable, + pub fn errorUnionPayload(ty: Type) Type { + return switch (ty.ip_index) { + .anyerror_void_error_union_type => Type.void, + .none => switch (ty.tag()) { + .error_union => ty.castTag(.error_union).?.data.payload, + else => unreachable, + }, + else => @panic("TODO"), }; } - pub fn errorUnionSet(self: Type) Type { - return switch (self.tag()) { - .anyerror_void_error_union => Type.anyerror, - .error_union => self.castTag(.error_union).?.data.error_set, - else => unreachable, + pub fn errorUnionSet(ty: Type) Type { + return switch (ty.ip_index) { + .anyerror_void_error_union_type => Type.anyerror, + .none => switch (ty.tag()) { + .error_union => ty.castTag(.error_union).?.data.error_set, + else => unreachable, + }, + else => @panic("TODO"), }; } @@ -4168,67 +3441,73 @@ pub const Type = struct { } /// Asserts the type is an array or vector or struct. - pub fn arrayLen(ty: Type) u64 { - return switch (ty.tag()) { - .vector => ty.castTag(.vector).?.data.len, - .array => ty.castTag(.array).?.data.len, - .array_sentinel => ty.castTag(.array_sentinel).?.data.len, - .array_u8 => ty.castTag(.array_u8).?.data, - .array_u8_sentinel_0 => ty.castTag(.array_u8_sentinel_0).?.data, - .tuple => ty.castTag(.tuple).?.data.types.len, - .anon_struct => ty.castTag(.anon_struct).?.data.types.len, - .@"struct" => ty.castTag(.@"struct").?.data.fields.count(), - .empty_struct, .empty_struct_literal => 0, + pub fn arrayLen(ty: Type, mod: *const Module) u64 { + return arrayLenIp(ty, mod.intern_pool); + } - else => unreachable, + pub fn arrayLenIp(ty: Type, ip: InternPool) u64 { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .array => ty.castTag(.array).?.data.len, + .array_sentinel => ty.castTag(.array_sentinel).?.data.len, + .tuple => ty.castTag(.tuple).?.data.types.len, + .anon_struct => ty.castTag(.anon_struct).?.data.types.len, + .@"struct" => ty.castTag(.@"struct").?.data.fields.count(), + .empty_struct, .empty_struct_literal => 0, + + else => unreachable, + }, + else => switch (ip.indexToKey(ty.ip_index)) { + .vector_type => |vector_type| vector_type.len, + .array_type => |array_type| array_type.len, + else => unreachable, + }, }; } - pub fn arrayLenIncludingSentinel(ty: Type) u64 { - return ty.arrayLen() + @boolToInt(ty.sentinel() != null); + pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 { + return ty.arrayLen(mod) + @boolToInt(ty.sentinel(mod) != null); } - pub fn vectorLen(ty: Type) u32 { - return switch (ty.tag()) { - .vector => @intCast(u32, ty.castTag(.vector).?.data.len), - .tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len), - .anon_struct => @intCast(u32, ty.castTag(.anon_struct).?.data.types.len), - else => unreachable, + pub fn vectorLen(ty: Type, mod: *const Module) u32 { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len), + .anon_struct => @intCast(u32, ty.castTag(.anon_struct).?.data.types.len), + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .vector_type => |vector_type| vector_type.len, + else => unreachable, + }, }; } /// Asserts the type is an array, pointer or vector. - pub fn sentinel(self: Type) ?Value { - return switch (self.tag()) { - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer_to_comptime_int, - .vector, - .array, - .array_u8, - .manyptr_u8, - .manyptr_const_u8, - .const_slice_u8, - .const_slice, - .mut_slice, - .tuple, - .empty_struct_literal, - .@"struct", - => return null, + pub fn sentinel(ty: Type, mod: *const Module) ?Value { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .array, + .tuple, + .empty_struct_literal, + .@"struct", + => null, - .pointer => return self.castTag(.pointer).?.data.sentinel, - .array_sentinel => return self.castTag(.array_sentinel).?.data.sentinel, + .pointer => ty.castTag(.pointer).?.data.sentinel, + .array_sentinel => ty.castTag(.array_sentinel).?.data.sentinel, - .array_u8_sentinel_0, - .const_slice_u8_sentinel_0, - .manyptr_const_u8_sentinel_0, - => return Value.zero, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .vector_type, + .struct_type, + => null, - else => unreachable, + .array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, + .ptr_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, + + else => unreachable, + }, }; } @@ -4292,8 +3571,6 @@ pub const Type = struct { return .{ .signedness = .unsigned, .bits = 16 }; }, - .vector => ty = ty.castTag(.vector).?.data.elem_type, - .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.layout == .Packed); @@ -4321,8 +3598,9 @@ pub const Type = struct { .int_type => |int_type| return int_type, .ptr_type => unreachable, .array_type => unreachable, - .vector_type => @panic("TODO"), - .optional_type => unreachable, + .vector_type => |vector_type| ty = vector_type.child.toType(), + + .opt_type => unreachable, .error_union_type => unreachable, .simple_type => unreachable, // handled via Index enum tag above .struct_type => @panic("TODO"), @@ -4426,7 +3704,11 @@ pub const Type = struct { /// Asserts the type is a function or a function pointer. pub fn fnReturnType(ty: Type) Type { - const fn_ty = if (ty.castPointer()) |p| p.data else ty; + const fn_ty = switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.pointee_type, + .function => ty, + else => unreachable, + }; return fn_ty.castTag(.function).?.data.return_type; } @@ -4516,8 +3798,12 @@ pub const Type = struct { }, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| { + if (vector_type.len == 0) return Value.initTag(.empty_array); + if (vector_type.child.toType().onePossibleValue(mod)) |v| return v; + return null; + }, + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16, @@ -4580,34 +3866,15 @@ pub const Type = struct { .error_set, .error_set_merged, .function, - .single_const_pointer_to_comptime_int, .array_sentinel, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .anyerror_void_error_union, .error_set_inferred, .@"opaque", - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, .anyframe_T, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer, - .single_mut_pointer, .pointer, => return null, .optional => { - var buf: Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn()) { return Value.null; } else { @@ -4690,10 +3957,10 @@ pub const Type = struct { .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .vector, .array, .array_u8 => { - if (ty.arrayLen() == 0) + .array => { + if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); - if (ty.elemType().onePossibleValue(mod) != null) + if (ty.childType(mod).onePossibleValue(mod) != null) return Value.initTag(.the_only_possible_value); return null; }, @@ -4711,9 +3978,9 @@ pub const Type = struct { if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .array_type => |array_type| return array_type.child.toType().comptimeOnly(mod), + .vector_type => |vector_type| return vector_type.child.toType().comptimeOnly(mod), + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16, @@ -4772,12 +4039,6 @@ pub const Type = struct { }; return switch (ty.tag()) { - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, .empty_struct_literal, .empty_struct, .error_set, @@ -4785,35 +4046,21 @@ pub const Type = struct { .error_set_inferred, .error_set_merged, .@"opaque", - .array_u8, - .array_u8_sentinel_0, .enum_simple, => false, - .single_const_pointer_to_comptime_int, // These are function bodies, not function pointers. - .function, - => true, + .function => true, .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .array, .array_sentinel, - .vector, - => return ty.childType().comptimeOnly(mod), + => return ty.childType(mod).comptimeOnly(mod), - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); + .pointer => { + const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { return false; } else { @@ -4821,12 +4068,8 @@ pub const Type = struct { } }, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return ty.optionalChild(&buf).comptimeOnly(mod); + .optional => { + return ty.optionalChild(mod).comptimeOnly(mod); }, .tuple, .anon_struct => { @@ -4882,6 +4125,10 @@ pub const Type = struct { }; } + pub fn isVector(ty: Type, mod: *const Module) bool { + return ty.zigTypeTag(mod) == .Vector; + } + pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, @@ -4892,9 +4139,9 @@ pub const Type = struct { pub fn isIndexable(ty: Type, mod: *const Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice, .Many, .C => true, - .One => ty.elemType().zigTypeTag(mod) == .Array, + .One => ty.childType(mod).zigTypeTag(mod) == .Array, }, .Struct => ty.isTuple(), else => false, @@ -4904,10 +4151,10 @@ pub const Type = struct { pub fn indexableHasLen(ty: Type, mod: *const Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Many, .C => false, .Slice => true, - .One => ty.elemType().zigTypeTag(mod) == .Array, + .One => ty.childType(mod).zigTypeTag(mod) == .Array, }, .Struct => ty.isTuple(), else => false, @@ -5527,14 +4774,6 @@ pub const Type = struct { /// with different enum tags, because the the former requires more payload data than the latter. /// See `zigTypeTag` for the function that corresponds to `std.builtin.TypeId`. pub const Tag = enum(usize) { - // The first section of this enum are tags that require no payload. - manyptr_u8, - manyptr_const_u8, - manyptr_const_u8_sentinel_0, - single_const_pointer_to_comptime_int, - const_slice_u8, - const_slice_u8_sentinel_0, - anyerror_void_error_union, /// Same as `empty_struct` except it has an empty namespace. empty_struct_literal, /// This is a special value that tracks a set of types that have been stored @@ -5545,28 +4784,15 @@ pub const Type = struct { inferred_alloc_const, // See last_no_payload_tag below. // After this, the tag requires a payload. - array_u8, - array_u8_sentinel_0, array, array_sentinel, - vector, /// Possible Value tags for this: @"struct" tuple, /// Possible Value tags for this: @"struct" anon_struct, pointer, - single_const_pointer, - single_mut_pointer, - many_const_pointer, - many_mut_pointer, - c_const_pointer, - c_mut_pointer, - const_slice, - mut_slice, function, optional, - optional_single_mut_pointer, - optional_single_const_pointer, error_union, anyframe_T, error_set, @@ -5590,33 +4816,12 @@ pub const Type = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .single_const_pointer_to_comptime_int, - .anyerror_void_error_union, - .const_slice_u8, - .const_slice_u8_sentinel_0, .inferred_alloc_const, .inferred_alloc_mut, .empty_struct_literal, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"), - .array_u8, - .array_u8_sentinel_0, - => Payload.Len, - - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, .anyframe_T, => Payload.ElemType, @@ -5624,7 +4829,7 @@ pub const Type = struct { .error_set_inferred => Payload.ErrorSetInferred, .error_set_merged => Payload.ErrorSetMerged, - .array, .vector => Payload.Array, + .array => Payload.Array, .array_sentinel => Payload.ArraySentinel, .pointer => Payload.Pointer, .function => Payload.Function, @@ -5847,15 +5052,28 @@ pub const Type = struct { @"volatile": bool = false, size: std.builtin.Type.Pointer.Size = .One, - pub const VectorIndex = enum(u32) { - none = std.math.maxInt(u32), - runtime = std.math.maxInt(u32) - 1, - _, - }; + pub const VectorIndex = InternPool.Key.PtrType.VectorIndex; + pub fn alignment(data: Data, mod: *const Module) u32 { if (data.@"align" != 0) return data.@"align"; return abiAlignment(data.pointee_type, mod); } + + pub fn fromKey(p: InternPool.Key.PtrType) Data { + return .{ + .pointee_type = p.elem_type.toType(), + .sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null, + .@"align" = p.alignment, + .@"addrspace" = p.address_space, + .bit_offset = p.bit_offset, + .host_size = p.host_size, + .vector_index = p.vector_index, + .@"allowzero" = p.is_allowzero, + .mutable = !p.is_const, + .@"volatile" = p.is_volatile, + .size = p.size, + }; + } }; }; @@ -5986,6 +5204,17 @@ pub const Type = struct { pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type, .legacy = undefined }; pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined }; + pub const const_slice_u8: Type = .{ .ip_index = .const_slice_u8_type, .legacy = undefined }; + pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type, .legacy = undefined }; + pub const single_const_pointer_to_comptime_int: Type = .{ + .ip_index = .single_const_pointer_to_comptime_int_type, + .legacy = undefined, + }; + pub const const_slice_u8_sentinel_0: Type = .{ + .ip_index = .const_slice_u8_sentinel_0_type, + .legacy = undefined, + }; + pub const generic_poison: Type = .{ .ip_index = .generic_poison_type, .legacy = undefined }; pub const err_int = Type.u16; @@ -6019,50 +5248,6 @@ pub const Type = struct { } } - if (d.@"align" == 0 and d.@"addrspace" == .generic and - d.bit_offset == 0 and d.host_size == 0 and d.vector_index == .none and - !d.@"allowzero" and !d.@"volatile") - { - if (d.sentinel) |sent| { - if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) { - switch (d.size) { - .Slice => { - if (sent.compareAllWithZero(.eq, mod)) { - return Type.initTag(.const_slice_u8_sentinel_0); - } - }, - .Many => { - if (sent.compareAllWithZero(.eq, mod)) { - return Type.initTag(.manyptr_const_u8_sentinel_0); - } - }, - else => {}, - } - } - } else if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) { - switch (d.size) { - .Slice => return Type.initTag(.const_slice_u8), - .Many => return Type.initTag(.manyptr_const_u8), - else => {}, - } - } else { - const T = Type.Tag; - const type_payload = try arena.create(Type.Payload.ElemType); - type_payload.* = .{ - .base = .{ - .tag = switch (d.size) { - .One => if (d.mutable) T.single_mut_pointer else T.single_const_pointer, - .Many => if (d.mutable) T.many_mut_pointer else T.many_const_pointer, - .C => if (d.mutable) T.c_mut_pointer else T.c_const_pointer, - .Slice => if (d.mutable) T.mut_slice else T.const_slice, - }, - }, - .data = d.pointee_type, - }; - return Type.initPayload(&type_payload.base); - } - } - return Type.Tag.pointer.create(arena, d); } @@ -6073,13 +5258,21 @@ pub const Type = struct { elem_type: Type, mod: *Module, ) Allocator.Error!Type { - if (elem_type.eql(Type.u8, mod)) { - if (sent) |some| { - if (some.eql(Value.zero, elem_type, mod)) { - return Tag.array_u8_sentinel_0.create(arena, len); + if (elem_type.ip_index != .none) { + if (sent) |s| { + if (s.ip_index != .none) { + return mod.arrayType(.{ + .len = len, + .child = elem_type.ip_index, + .sentinel = s.ip_index, + }); } } else { - return Tag.array_u8.create(arena, len); + return mod.arrayType(.{ + .len = len, + .child = elem_type.ip_index, + .sentinel = .none, + }); } } @@ -6097,24 +5290,11 @@ pub const Type = struct { }); } - pub fn vector(arena: Allocator, len: u64, elem_type: Type) Allocator.Error!Type { - return Tag.vector.create(arena, .{ - .len = len, - .elem_type = elem_type, - }); - } - - pub fn optional(arena: Allocator, child_type: Type) Allocator.Error!Type { - switch (child_type.tag()) { - .single_const_pointer => return Type.Tag.optional_single_const_pointer.create( - arena, - child_type.elemType(), - ), - .single_mut_pointer => return Type.Tag.optional_single_mut_pointer.create( - arena, - child_type.elemType(), - ), - else => return Type.Tag.optional.create(arena, child_type), + pub fn optional(arena: Allocator, child_type: Type, mod: *Module) Allocator.Error!Type { + if (child_type.ip_index != .none) { + return mod.optionalType(child_type.ip_index); + } else { + return Type.Tag.optional.create(arena, child_type); } } @@ -6125,12 +5305,6 @@ pub const Type = struct { mod: *Module, ) Allocator.Error!Type { assert(error_set.zigTypeTag(mod) == .ErrorSet); - if (error_set.eql(Type.anyerror, mod) and - payload.eql(Type.void, mod)) - { - return Type.initTag(.anyerror_void_error_union); - } - return Type.Tag.error_union.create(arena, .{ .error_set = error_set, .payload = payload, diff --git a/src/value.zig b/src/value.zig index cbf18c672c43..6f7210c884cc 100644 --- a/src/value.zig +++ b/src/value.zig @@ -33,14 +33,6 @@ pub const Value = struct { // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - manyptr_u8_type, - manyptr_const_u8_type, - manyptr_const_u8_sentinel_0_type, - single_const_pointer_to_comptime_int_type, - const_slice_u8_type, - const_slice_u8_sentinel_0_type, - anyerror_void_error_union_type, - undef, zero, one, @@ -140,11 +132,6 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .single_const_pointer_to_comptime_int_type, - .const_slice_u8_type, - .const_slice_u8_sentinel_0_type, - .anyerror_void_error_union_type, - .undef, .zero, .one, @@ -153,9 +140,6 @@ pub const Value = struct { .empty_struct_value, .empty_array, .null_value, - .manyptr_u8_type, - .manyptr_const_u8_type, - .manyptr_const_u8_sentinel_0_type, => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), .int_big_positive, @@ -280,9 +264,7 @@ pub const Value = struct { } pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() { - if (self.ip_index != .none) { - return null; - } + assert(self.ip_index == .none); if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; @@ -305,11 +287,6 @@ pub const Value = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .single_const_pointer_to_comptime_int_type, - .const_slice_u8_type, - .const_slice_u8_sentinel_0_type, - .anyerror_void_error_union_type, - .undef, .zero, .one, @@ -318,9 +295,6 @@ pub const Value = struct { .empty_array, .null_value, .empty_struct_value, - .manyptr_u8_type, - .manyptr_const_u8_type, - .manyptr_const_u8_sentinel_0_type, => unreachable, .ty, .lazy_align, .lazy_size => { @@ -553,14 +527,6 @@ pub const Value = struct { } var val = start_val; while (true) switch (val.tag()) { - .single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"), - .const_slice_u8_type => return out_stream.writeAll("[]const u8"), - .const_slice_u8_sentinel_0_type => return out_stream.writeAll("[:0]const u8"), - .anyerror_void_error_union_type => return out_stream.writeAll("anyerror!void"), - .manyptr_u8_type => return out_stream.writeAll("[*]u8"), - .manyptr_const_u8_type => return out_stream.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0_type => return out_stream.writeAll("[*:0]const u8"), - .empty_struct_value => return out_stream.writeAll("struct {}{}"), .aggregate => { return out_stream.writeAll("(aggregate)"); @@ -674,7 +640,7 @@ pub const Value = struct { switch (val.tag()) { .bytes => { const bytes = val.castTag(.bytes).?.data; - const adjusted_len = bytes.len - @boolToInt(ty.sentinel() != null); + const adjusted_len = bytes.len - @boolToInt(ty.sentinel(mod) != null); const adjusted_bytes = bytes[0..adjusted_len]; return allocator.dupe(u8, adjusted_bytes); }, @@ -686,7 +652,7 @@ pub const Value = struct { .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data), .repeated => { const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen())); + const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); @memset(result, byte); return result; }, @@ -701,7 +667,7 @@ pub const Value = struct { const slice = val.castTag(.slice).?.data; return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod); }, - else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, mod), + else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), } } @@ -720,13 +686,6 @@ pub const Value = struct { if (self.ip_index != .none) return self.ip_index.toType(); return switch (self.tag()) { .ty => self.castTag(.ty).?.data, - .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), - .const_slice_u8_type => Type.initTag(.const_slice_u8), - .const_slice_u8_sentinel_0_type => Type.initTag(.const_slice_u8_sentinel_0), - .anyerror_void_error_union_type => Type.initTag(.anyerror_void_error_union), - .manyptr_u8_type => Type.initTag(.manyptr_u8), - .manyptr_const_u8_type => Type.initTag(.manyptr_const_u8), - .manyptr_const_u8_sentinel_0_type => Type.initTag(.manyptr_const_u8_sentinel_0), else => unreachable, }; @@ -1096,8 +1055,8 @@ pub const Value = struct { else => unreachable, }, .Array => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); const elem_size = @intCast(usize, elem_ty.abiSize(mod)); var elem_i: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; @@ -1150,8 +1109,7 @@ pub const Value = struct { }, .Optional => { if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout; - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + const child = ty.optionalChild(mod); const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToMemory(child, mod, buffer); @@ -1220,9 +1178,9 @@ pub const Value = struct { else => unreachable, }, .Vector => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); - const len = @intCast(usize, ty.arrayLen()); + const len = @intCast(usize, ty.arrayLen(mod)); var bits: u16 = 0; var elem_i: usize = 0; @@ -1267,8 +1225,7 @@ pub const Value = struct { }, .Optional => { assert(ty.isPtrLikeOptional(mod)); - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + const child = ty.optionalChild(mod); const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToPackedMemory(child, mod, buffer, bit_offset); @@ -1335,9 +1292,9 @@ pub const Value = struct { else => unreachable, }, .Array => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const elem_size = elem_ty.abiSize(mod); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); + const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); var offset: usize = 0; for (elems) |*elem| { elem.* = try readFromMemory(elem_ty, mod, buffer[offset..], arena); @@ -1386,8 +1343,7 @@ pub const Value = struct { }, .Optional => { assert(ty.isPtrLikeOptional(mod)); - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + const child = ty.optionalChild(mod); return readFromMemory(child, mod, buffer, arena); }, else => @panic("TODO implement readFromMemory for more types"), @@ -1449,8 +1405,8 @@ pub const Value = struct { else => unreachable, }, .Vector => { - const elem_ty = ty.childType(); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); + const elem_ty = ty.childType(mod); + const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); var bits: u16 = 0; const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); @@ -1483,8 +1439,7 @@ pub const Value = struct { }, .Optional => { assert(ty.isPtrLikeOptional(mod)); - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + const child = ty.optionalChild(mod); return readFromPackedMemory(child, mod, buffer, bit_offset, arena); }, else => @panic("TODO implement readFromPackedMemory for more types"), @@ -1956,7 +1911,7 @@ pub const Value = struct { pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool { if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < ty.vectorLen()) : (i += 1) { + while (i < ty.vectorLen(mod)) : (i += 1) { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); @@ -2092,8 +2047,7 @@ pub const Value = struct { .opt_payload => { const a_payload = a.castTag(.opt_payload).?.data; const b_payload = b.castTag(.opt_payload).?.data; - var buffer: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buffer); + const payload_ty = ty.optionalChild(mod); return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); }, .slice => { @@ -2175,7 +2129,7 @@ pub const Value = struct { return true; } - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); for (a_field_vals, 0..) |a_elem, i| { const b_elem = b_field_vals[i]; @@ -2239,8 +2193,8 @@ pub const Value = struct { return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, .Array, .Vector => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); var i: usize = 0; var a_buf: ElemValueBuffer = undefined; var b_buf: ElemValueBuffer = undefined; @@ -2253,11 +2207,11 @@ pub const Value = struct { } return true; }, - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice => { - const a_len = switch (a_ty.ptrSize()) { + const a_len = switch (a_ty.ptrSize(mod)) { .Slice => a.sliceLen(mod), - .One => a_ty.childType().arrayLen(), + .One => a_ty.childType(mod).arrayLen(mod), else => unreachable, }; if (a_len != b.sliceLen(mod)) { @@ -2266,7 +2220,7 @@ pub const Value = struct { var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = ty.slicePtrFieldType(&ptr_buf); - const a_ptr = switch (a_ty.ptrSize()) { + const a_ptr = switch (a_ty.ptrSize(mod)) { .Slice => a.slicePtr(), .One => a, else => unreachable, @@ -2412,8 +2366,8 @@ pub const Value = struct { else => return hashPtr(val, hasher, mod), }, .Array, .Vector => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); var index: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { @@ -2438,8 +2392,7 @@ pub const Value = struct { if (val.castTag(.opt_payload)) |payload| { std.hash.autoHash(hasher, true); // non-null const sub_val = payload.data; - var buffer: Type.Payload.ElemType = undefined; - const sub_ty = ty.optionalChild(&buffer); + const sub_ty = ty.optionalChild(mod); sub_val.hash(sub_ty, hasher, mod); } else { std.hash.autoHash(hasher, false); // null @@ -2534,8 +2487,8 @@ pub const Value = struct { else => val.hashPtr(hasher, mod), }, .Array, .Vector => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); var index: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { @@ -2544,8 +2497,7 @@ pub const Value = struct { } }, .Optional => if (val.castTag(.opt_payload)) |payload| { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); payload.data.hashUncoerced(child_ty, hasher, mod); } else std.hash.autoHash(hasher, std.builtin.TypeId.Null), .ErrorSet, .ErrorUnion => if (val.getError()) |err| hasher.update(err) else { @@ -2720,7 +2672,7 @@ pub const Value = struct { const decl_index = val.castTag(.decl_ref).?.data; const decl = mod.declPtr(decl_index); if (decl.ty.zigTypeTag(mod) == .Array) { - return decl.ty.arrayLen(); + return decl.ty.arrayLen(mod); } else { return 1; } @@ -2729,7 +2681,7 @@ pub const Value = struct { const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; const decl = mod.declPtr(decl_index); if (decl.ty.zigTypeTag(mod) == .Array) { - return decl.ty.arrayLen(); + return decl.ty.arrayLen(mod); } else { return 1; } @@ -2737,7 +2689,7 @@ pub const Value = struct { .comptime_field_ptr => { const payload = val.castTag(.comptime_field_ptr).?.data; if (payload.field_ty.zigTypeTag(mod) == .Array) { - return payload.field_ty.arrayLen(); + return payload.field_ty.arrayLen(mod); } else { return 1; } @@ -3137,7 +3089,7 @@ pub const Value = struct { pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { if (int_ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, int_ty.vectorLen()); + const result_data = try arena.alloc(Value, int_ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -3250,7 +3202,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3298,7 +3250,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3345,8 +3297,8 @@ pub const Value = struct { mod: *Module, ) !OverflowArithmeticResult { if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try arena.alloc(Value, ty.vectorLen()); - const result_data = try arena.alloc(Value, ty.vectorLen()); + const overflowed_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3408,7 +3360,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3452,7 +3404,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3527,7 +3479,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -3565,7 +3517,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3601,7 +3553,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3631,7 +3583,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3666,7 +3618,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3701,7 +3653,7 @@ pub const Value = struct { pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3741,7 +3693,7 @@ pub const Value = struct { pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3781,7 +3733,7 @@ pub const Value = struct { pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3857,7 +3809,7 @@ pub const Value = struct { pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3904,7 +3856,7 @@ pub const Value = struct { pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3950,7 +3902,7 @@ pub const Value = struct { pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3986,7 +3938,7 @@ pub const Value = struct { pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4007,7 +3959,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4038,7 +3990,7 @@ pub const Value = struct { pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4078,8 +4030,8 @@ pub const Value = struct { mod: *Module, ) !OverflowArithmeticResult { if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try allocator.alloc(Value, ty.vectorLen()); - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const overflowed_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4136,7 +4088,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4184,7 +4136,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4212,7 +4164,7 @@ pub const Value = struct { pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4264,7 +4216,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4300,7 +4252,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4359,7 +4311,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4418,7 +4370,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4477,7 +4429,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4530,7 +4482,7 @@ pub const Value = struct { pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4570,7 +4522,7 @@ pub const Value = struct { pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4610,7 +4562,7 @@ pub const Value = struct { pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4650,7 +4602,7 @@ pub const Value = struct { pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4690,7 +4642,7 @@ pub const Value = struct { pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4730,7 +4682,7 @@ pub const Value = struct { pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4770,7 +4722,7 @@ pub const Value = struct { pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4810,7 +4762,7 @@ pub const Value = struct { pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4850,7 +4802,7 @@ pub const Value = struct { pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4890,7 +4842,7 @@ pub const Value = struct { pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4930,7 +4882,7 @@ pub const Value = struct { pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4970,7 +4922,7 @@ pub const Value = struct { pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -5010,7 +4962,7 @@ pub const Value = struct { pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -5050,7 +5002,7 @@ pub const Value = struct { pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -5097,7 +5049,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var mulend1_buf: Value.ElemValueBuffer = undefined; const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf); From 773fabf3610629c8974b59ed6fbd27050b7e505b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 4 May 2023 20:39:32 -0700 Subject: [PATCH 020/205] InternPool: add the missing pointer data --- src/InternPool.zig | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 295a694e2aab..7328c74b4f4b 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -653,7 +653,6 @@ pub const Tag = enum(u8) { /// data is payload to Vector. type_vector, /// A fully explicitly specified pointer type. - /// TODO actually this is missing some stuff like bit_offset /// data is payload to Pointer. type_pointer, /// An optional type. @@ -793,6 +792,8 @@ pub const Pointer = struct { child: Index, sentinel: Index, flags: Flags, + packed_offset: PackedOffset, + vector_index: VectorIndex, pub const Flags = packed struct(u32) { alignment: u16, @@ -804,8 +805,14 @@ pub const Pointer = struct { _: u7 = undefined, }; + pub const PackedOffset = packed struct(u32) { + host_size: u16, + bit_offset: u16, + }; + pub const Size = std.builtin.Type.Pointer.Size; pub const AddressSpace = std.builtin.AddressSpace; + pub const VectorIndex = Key.PtrType.VectorIndex; }; /// Used for non-sentineled arrays that have length fitting in u32, as well as @@ -914,6 +921,9 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .is_volatile = ptr_info.flags.is_volatile, .is_allowzero = ptr_info.flags.is_allowzero, .address_space = ptr_info.flags.address_space, + .vector_index = ptr_info.vector_index, + .host_size = ptr_info.packed_offset.host_size, + .bit_offset = ptr_info.packed_offset.bit_offset, } }; }, @@ -972,6 +982,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .size = ptr_type.size, .address_space = ptr_type.address_space, }, + .packed_offset = .{ + .host_size = ptr_type.host_size, + .bit_offset = ptr_type.bit_offset, + }, + .vector_index = ptr_type.vector_index, }), }); }, @@ -1126,6 +1141,8 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { Index => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), Pointer.Flags => @bitCast(u32, @field(extra, field.name)), + Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), + Pointer.VectorIndex => @enumToInt(@field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), }); } @@ -1180,6 +1197,8 @@ fn extraData(ip: InternPool, comptime T: type, index: usize) T { Index => @intToEnum(Index, ip.extra.items[i]), i32 => @bitCast(i32, ip.extra.items[i]), Pointer.Flags => @bitCast(Pointer.Flags, ip.extra.items[i]), + Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, ip.extra.items[i]), + Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, ip.extra.items[i]), else => @compileError("bad field type: " ++ @typeName(field.type)), }; i += 1; From 6ab8b6f8b273356ce248a075b6a0657bfea33c79 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 4 May 2023 21:40:35 -0700 Subject: [PATCH 021/205] stage2: move undef, unreach, null values to InternPool --- src/Module.zig | 8 +- src/Sema.zig | 1102 +++++++++++++++++++------------------ src/TypedValue.zig | 575 +++++++++---------- src/arch/wasm/CodeGen.zig | 14 +- src/codegen.zig | 150 ++--- src/codegen/c.zig | 242 ++++---- src/codegen/llvm.zig | 119 ++-- src/codegen/spirv.zig | 11 +- src/type.zig | 58 +- src/value.zig | 494 +++++++++-------- 10 files changed, 1440 insertions(+), 1333 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 67ca91266c3e..b1cbd8829726 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -932,7 +932,7 @@ pub const Decl = struct { assert(decl.has_tv); return switch (decl.val.tag()) { .extern_fn => true, - .variable => decl.val.castTag(.variable).?.data.init.tag() == .unreachable_value, + .variable => decl.val.castTag(.variable).?.data.init.ip_index == .unreachable_value, else => false, }; } @@ -4849,6 +4849,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { var is_extern = false; switch (decl_tv.val.ip_index) { .generic_poison => unreachable, + .unreachable_value => unreachable, + .none => switch (decl_tv.val.tag()) { .variable => { const variable = decl_tv.val.castTag(.variable).?.data; @@ -4869,8 +4871,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { } }, - .unreachable_value => unreachable, - .function => {}, else => { @@ -6592,7 +6592,7 @@ pub fn populateTestFunctions( .len = try Value.Tag.int_u64.create(arena, test_name_slice.len), }), // name try Value.Tag.decl_ref.create(arena, test_decl_index), // func - Value.initTag(.null_value), // async_frame_size + Value.null, // async_frame_size }; test_fn_vals[i] = try Value.Tag.aggregate.create(arena, field_vals); } diff --git a/src/Sema.zig b/src/Sema.zig index 87df2f23e1ab..3406d0d80f52 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1877,8 +1877,8 @@ fn resolveConstValue( if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| { switch (val.ip_index) { .generic_poison => return error.GenericPoison, + .undef => return sema.failWithUseOfUndef(block, src), .none => switch (val.tag()) { - .undef => return sema.failWithUseOfUndef(block, src), .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, }, @@ -4409,7 +4409,7 @@ fn validateStructInit( if (field_ptr != 0) continue; const default_val = struct_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { if (struct_ty.isTuple()) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -4554,7 +4554,7 @@ fn validateStructInit( } const default_val = struct_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { if (struct_ty.isTuple()) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -4644,7 +4644,7 @@ fn zirValidateArrayInit( var i = instrs.len; while (i < array_len) : (i += 1) { const default_val = array_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -7885,7 +7885,7 @@ fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) const tuple = ty.tupleFields(); for (tuple.values, 0..) |field_val, i| { try sema.resolveTupleLazyValues(block, src, tuple.types[i]); - if (field_val.tag() == .unreachable_value) continue; + if (field_val.ip_index == .unreachable_value) continue; try sema.resolveLazyValue(field_val); } } @@ -12641,7 +12641,7 @@ fn analyzeTupleCat( const default_val = lhs_ty.structFieldDefaultValue(i); values[i] = default_val; const operand_src = lhs_src; // TODO better source location - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { runtime_src = operand_src; } } @@ -12651,7 +12651,7 @@ fn analyzeTupleCat( const default_val = rhs_ty.structFieldDefaultValue(i); values[i + lhs_len] = default_val; const operand_src = rhs_src; // TODO better source location - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { runtime_src = operand_src; } } @@ -12809,8 +12809,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai while (elem_i < lhs_len) : (elem_i += 1) { const lhs_elem_i = elem_i; const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i) else lhs_info.elem_type; - const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.initTag(.unreachable_value); - const elem_val = if (elem_default_val.tag() == .unreachable_value) try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_elem_i) else elem_default_val; + const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.@"unreachable"; + const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); @@ -12819,8 +12819,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i) else rhs_info.elem_type; - const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.initTag(.unreachable_value); - const elem_val = if (elem_default_val.tag() == .unreachable_value) try rhs_sub_val.elemValue(sema.mod, sema.arena, rhs_elem_i) else elem_default_val; + const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.@"unreachable"; + const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(sema.mod, sema.arena, rhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); @@ -12962,7 +12962,7 @@ fn analyzeTupleMul( types[i] = operand_ty.structFieldType(i); values[i] = operand_ty.structFieldDefaultValue(i); const operand_src = lhs_src; // TODO better source location - if (values[i].tag() == .unreachable_value) { + if (values[i].ip_index == .unreachable_value) { runtime_src = operand_src; } } @@ -14332,7 +14332,7 @@ fn zirOverflowArithmetic( var result: struct { inst: Air.Inst.Ref = .none, - wrapped: Value = Value.initTag(.unreachable_value), + wrapped: Value = Value.@"unreachable", overflow_bit: Value, } = result: { switch (zir_tag) { @@ -14508,8 +14508,8 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { types[0] = ty; types[1] = ov_ty; - values[0] = Value.initTag(.unreachable_value); - values[1] = Value.initTag(.unreachable_value); + values[0] = Value.@"unreachable"; + values[1] = Value.@"unreachable"; return tuple_ty; } @@ -15647,7 +15647,7 @@ fn zirClosureCapture( // value only. In such case we preserve the type and use a dummy runtime value. const operand = try sema.resolveInst(inst_data.operand); const val = (try sema.resolveMaybeUndefValAllowVariables(operand)) orelse - Value.initTag(.unreachable_value); + Value.@"unreachable"; try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, .{ .ty = try sema.typeOf(operand).copy(sema.perm_arena), @@ -15684,7 +15684,7 @@ fn zirClosureGet( scope = scope.parent.?; }; - if (tv.val.tag() == .unreachable_value and !block.is_typeof and sema.func == null) { + if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func == null) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(); @@ -15712,7 +15712,7 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.tag() == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) { + if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(); @@ -15742,7 +15742,7 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.tag() == .unreachable_value) { + if (tv.val.ip_index == .unreachable_value) { assert(block.is_typeof); // We need a dummy runtime instruction with the correct type. return block.addTy(.alloc, tv.ty); @@ -16477,7 +16477,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const struct_field_fields = try fields_anon_decl.arena().create([5]Value); const field_val = tuple.values[i]; - const is_comptime = field_val.tag() != .unreachable_value; + const is_comptime = field_val.ip_index != .unreachable_value; const opt_default_val = if (is_comptime) field_val else null; const default_val_ptr = try sema.optRefValue(block, field_ty, opt_default_val); struct_field_fields.* = .{ @@ -16518,7 +16518,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const struct_field_fields = try fields_anon_decl.arena().create([5]Value); - const opt_default_val = if (field.default_val.tag() == .unreachable_value) + const opt_default_val = if (field.default_val.ip_index == .unreachable_value) null else field.default_val; @@ -16570,7 +16570,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty); break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val); } else { - break :blk Value.initTag(.null_value); + break :blk Value.null; } }; @@ -17974,7 +17974,7 @@ fn finishStructInit( for (struct_obj.values, 0..) |default_val, i| { if (field_inits[i] != .none) continue; - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { const field_name = struct_obj.names[i]; const template = "missing struct field: {s}"; const args = .{field_name}; @@ -17994,7 +17994,7 @@ fn finishStructInit( if (field_inits[i] != .none) continue; const default_val = struct_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -18010,7 +18010,7 @@ fn finishStructInit( for (struct_obj.fields.values(), 0..) |field, i| { if (field_inits[i] != .none) continue; - if (field.default_val.tag() == .unreachable_value) { + if (field.default_val.ip_index == .unreachable_value) { const field_name = struct_obj.fields.keys()[i]; const template = "missing struct field: {s}"; const args = .{field_name}; @@ -18145,7 +18145,7 @@ fn zirStructInitAnon( if (try sema.resolveMaybeUndefVal(init)) |init_val| { values[i] = init_val; } else { - values[i] = Value.initTag(.unreachable_value); + values[i] = Value.@"unreachable"; runtime_index = i; } } @@ -18191,7 +18191,7 @@ fn zirStructInitAnon( .@"addrspace" = target_util.defaultAddressSpace(target, .local), .pointee_type = field_ty, }); - if (values[i].tag() == .unreachable_value) { + if (values[i].ip_index == .unreachable_value) { const init = try sema.resolveInst(item.data.init); const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, init); @@ -18357,7 +18357,7 @@ fn zirArrayInitAnon( if (try sema.resolveMaybeUndefVal(elem)) |val| { values[i] = val; } else { - values[i] = Value.initTag(.unreachable_value); + values[i] = Value.@"unreachable"; runtime_src = operand_src; } } @@ -18390,7 +18390,7 @@ fn zirArrayInitAnon( .@"addrspace" = target_util.defaultAddressSpace(target, .local), .pointee_type = types[i], }); - if (values[i].tag() == .unreachable_value) { + if (values[i].ip_index == .unreachable_value) { const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, try sema.resolveInst(operand)); } @@ -19545,8 +19545,8 @@ fn reifyStruct( else opt_val; break :blk try payload_val.copy(new_decl_arena_allocator); - } else Value.initTag(.unreachable_value); - if (is_comptime_val.toBool(mod) and default_val.tag() == .unreachable_value) { + } else Value.@"unreachable"; + if (is_comptime_val.toBool(mod) and default_val.ip_index == .unreachable_value) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } @@ -22579,7 +22579,7 @@ fn zirVarExtended( break :blk (try sema.resolveMaybeUndefVal(init)) orelse return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime-known"); - } else Value.initTag(.unreachable_value); + } else Value.@"unreachable"; try sema.validateVarType(block, ty_src, var_ty, small.is_extern); @@ -23080,7 +23080,7 @@ fn zirBuiltinExtern( const new_var = try new_decl_arena_allocator.create(Module.Var); new_var.* = .{ .owner_decl = sema.owner_decl_index, - .init = Value.initTag(.unreachable_value), + .init = Value.@"unreachable", .is_extern = true, .is_mutable = false, .is_threadlocal = options.is_thread_local, @@ -25736,7 +25736,7 @@ fn coerceExtra( } } else { in_memory_result = .{ .ptr_sentinel = .{ - .actual = Value.initTag(.unreachable_value), + .actual = Value.@"unreachable", .wanted = dest_sent, .ty = dst_elem_type, } }; @@ -26116,26 +26116,28 @@ fn coerceExtra( .ErrorUnion => switch (inst_ty.zigTypeTag(mod)) { .ErrorUnion => eu: { if (maybe_inst_val) |inst_val| { - switch (inst_val.tag()) { + switch (inst_val.ip_index) { .undef => return sema.addConstUndef(dest_ty), - .eu_payload => { - const payload = try sema.addConstant( - inst_ty.errorUnionPayload(), - inst_val.castTag(.eu_payload).?.data, - ); - return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) { - error.NotCoercible => break :eu, - else => |e| return e, - }; - }, - else => { - const error_set = try sema.addConstant( - inst_ty.errorUnionSet(), - inst_val, - ); - return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src); + .none => switch (inst_val.tag()) { + .eu_payload => { + const payload = try sema.addConstant( + inst_ty.errorUnionPayload(), + inst_val.castTag(.eu_payload).?.data, + ); + return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) { + error.NotCoercible => break :eu, + else => |e| return e, + }; + }, + else => {}, }, + else => {}, } + const error_set = try sema.addConstant( + inst_ty.errorUnionSet(), + inst_val, + ); + return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src); } }, .ErrorSet => { @@ -26413,7 +26415,7 @@ const InMemoryCoercionResult = union(enum) { break; }, .array_sentinel => |sentinel| { - if (sentinel.actual.tag() != .unreachable_value) { + if (sentinel.actual.ip_index != .unreachable_value) { try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{ sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), }); @@ -26539,7 +26541,7 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_sentinel => |sentinel| { - if (sentinel.actual.tag() != .unreachable_value) { + if (sentinel.actual.ip_index != .unreachable_value) { try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{ sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), }); @@ -26747,8 +26749,8 @@ fn coerceInMemoryAllowed( dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, mod)); if (!ok_sent) { return InMemoryCoercionResult{ .array_sentinel = .{ - .actual = src_info.sentinel orelse Value.initTag(.unreachable_value), - .wanted = dest_info.sentinel orelse Value.initTag(.unreachable_value), + .actual = src_info.sentinel orelse Value.@"unreachable", + .wanted = dest_info.sentinel orelse Value.@"unreachable", .ty = dest_info.elem_type, } }; } @@ -27129,8 +27131,8 @@ fn coerceInMemoryAllowedPtrs( dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type, sema.mod)); if (!ok_sent) { return InMemoryCoercionResult{ .ptr_sentinel = .{ - .actual = src_info.sentinel orelse Value.initTag(.unreachable_value), - .wanted = dest_info.sentinel orelse Value.initTag(.unreachable_value), + .actual = src_info.sentinel orelse Value.@"unreachable", + .wanted = dest_info.sentinel orelse Value.@"unreachable", .ty = dest_info.pointee_type, } }; } @@ -27540,7 +27542,7 @@ fn beginComptimePtrMutation( }; } - switch (val_ptr.tag()) { + switch (val_ptr.ip_index) { .undef => { // An array has been initialized to undefined at comptime and now we // are for the first time setting an element. We must change the representation @@ -27565,127 +27567,130 @@ fn beginComptimePtrMutation( parent.decl_ref_mut, ); }, - .bytes => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `bytes` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - // bytes.len may be one greater than dest_len because of the case when - // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. - assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (elems, 0..) |*elem, i| { - elem.* = try Value.Tag.int_u64.create(arena, bytes[i]); - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + .none => switch (val_ptr.tag()) { + .bytes => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `bytes` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const bytes = val_ptr.castTag(.bytes).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + // bytes.len may be one greater than dest_len because of the case when + // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. + assert(bytes.len >= dest_len); + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (elems, 0..) |*elem, i| { + elem.* = try Value.Tag.int_u64.create(arena, bytes[i]); + } - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .str_lit => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `str_lit` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (bytes, 0..) |byte, i| { - elems[i] = try Value.Tag.int_u64.create(arena, byte); - } - if (parent.ty.sentinel(mod)) |sent_val| { - assert(elems.len == bytes.len + 1); - elems[bytes.len] = sent_val; - } + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .str_lit => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `str_lit` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const str_lit = val_ptr.castTag(.str_lit).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (bytes, 0..) |byte, i| { + elems[i] = try Value.Tag.int_u64.create(arena, byte); + } + if (parent.ty.sentinel(mod)) |sent_val| { + assert(elems.len == bytes.len + 1); + elems[bytes.len] = sent_val; + } - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .repeated => { - // An array is memory-optimized to store only a single element value, and - // that value is understood to be the same for the entire length of the array. - // However, now we want to modify an individual field and so the - // representation has to change. If we wanted to avoid this, there would - // need to be special detection elsewhere to identify when writing a value to an - // array element that is stored using the `repeated` tag, and handle it - // without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .repeated => { + // An array is memory-optimized to store only a single element value, and + // that value is understood to be the same for the entire length of the array. + // However, now we want to modify an individual field and so the + // representation has to change. If we wanted to avoid this, there would + // need to be special detection elsewhere to identify when writing a value to an + // array element that is stored using the `repeated` tag, and handle it + // without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + if (elems.len > 0) elems[0] = repeated_val; + for (elems[1..]) |*elem| { + elem.* = try repeated_val.copy(arena); + } - const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - if (elems.len > 0) elems[0] = repeated_val; - for (elems[1..]) |*elem| { - elem.* = try repeated_val.copy(arena); - } + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, - return beginComptimePtrMutationInner( + .aggregate => return beginComptimePtrMutationInner( sema, block, src, elem_ty, - &elems[elem_ptr.index], + &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], ptr_elem_ty, parent.decl_ref_mut, - ); - }, + ), - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ), + .the_only_possible_value => { + const duped = try sema.arena.create(Value); + duped.* = Value.initTag(.the_only_possible_value); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + duped, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, - .the_only_possible_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); + else => unreachable, }, - else => unreachable, } }, @@ -27738,7 +27743,7 @@ fn beginComptimePtrMutation( var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); switch (parent.pointee) { - .direct => |val_ptr| switch (val_ptr.tag()) { + .direct => |val_ptr| switch (val_ptr.ip_index) { .undef => { // A struct or union has been initialized to undefined at comptime and now we // are for the first time setting a field. We must change the representation @@ -27815,72 +27820,75 @@ fn beginComptimePtrMutation( else => unreachable, } }, - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - &val_ptr.castTag(.aggregate).?.data[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ), - - .@"union" => { - // We need to set the active field of the union. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const payload = &val_ptr.castTag(.@"union").?.data; - payload.tag = try Value.Tag.enum_field_index.create(arena, field_index); - - return beginComptimePtrMutationInner( + .none => switch (val_ptr.tag()) { + .aggregate => return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index), - &payload.val, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .slice => switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), - &val_ptr.castTag(.slice).?.data.ptr, + &val_ptr.castTag(.aggregate).?.data[field_index], ptr_elem_ty, parent.decl_ref_mut, ), - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), + .@"union" => { + // We need to set the active field of the union. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - else => unreachable, - }, + const payload = &val_ptr.castTag(.@"union").?.data; + payload.tag = try Value.Tag.enum_field_index.create(arena, field_index); - .empty_struct_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index), + &payload.val, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .slice => switch (field_index) { + Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.decl_ref_mut, + ), + + Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.decl_ref_mut, + ), + + else => unreachable, + }, + + .empty_struct_value => { + const duped = try sema.arena.create(Value); + duped.* = Value.initTag(.the_only_possible_value); + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index), + duped, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + else => unreachable, + }, else => unreachable, }, .reinterpret => |reinterpret| { @@ -27951,7 +27959,7 @@ fn beginComptimePtrMutation( switch (parent.pointee) { .direct => |val_ptr| { const payload_ty = parent.ty.optionalChild(mod); - switch (val_ptr.tag()) { + switch (val_ptr.ip_index) { .undef, .null_value => { // An optional has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the @@ -27973,12 +27981,19 @@ fn beginComptimePtrMutation( .ty = payload_ty, }; }, - .opt_payload => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, - }, + .none => switch (val_ptr.tag()) { + .opt_payload => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, + .ty = payload_ty, + }, + else => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = val_ptr }, + .ty = payload_ty, + }, + }, else => return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .pointee = .{ .direct = val_ptr }, @@ -28092,231 +28107,236 @@ fn beginComptimePtrLoad( ) ComptimePtrLoadError!ComptimePtrLoadKit { const mod = sema.mod; const target = sema.mod.getTarget(); - var deref: ComptimePtrLoadKit = switch (ptr_val.tag()) { - .decl_ref, - .decl_ref_mut, - => blk: { - const decl_index = switch (ptr_val.tag()) { - .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, - }; - const is_mutable = ptr_val.tag() == .decl_ref_mut; - const decl = sema.mod.declPtr(decl_index); - const decl_tv = try decl.typedValue(); - if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; - - const layout_defined = decl.ty.hasWellDefinedLayout(mod); - break :blk ComptimePtrLoadKit{ - .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, - .pointee = decl_tv, - .is_mutable = is_mutable, - .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, - }; + + var deref: ComptimePtrLoadKit = switch (ptr_val.ip_index) { + .null_value => { + return sema.fail(block, src, "attempt to use null value", .{}); }, - .elem_ptr => blk: { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const elem_ty = elem_ptr.elem_ty; - var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.array_ptr, null); + .none => switch (ptr_val.tag()) { + .decl_ref, + .decl_ref_mut, + => blk: { + const decl_index = switch (ptr_val.tag()) { + .decl_ref => ptr_val.castTag(.decl_ref).?.data, + .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, + else => unreachable, + }; + const is_mutable = ptr_val.tag() == .decl_ref_mut; + const decl = sema.mod.declPtr(decl_index); + const decl_tv = try decl.typedValue(); + if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; + + const layout_defined = decl.ty.hasWellDefinedLayout(mod); + break :blk ComptimePtrLoadKit{ + .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, + .pointee = decl_tv, + .is_mutable = is_mutable, + .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, + }; + }, - // This code assumes that elem_ptrs have been "flattened" in order for direct dereference - // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that - // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" - if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { - assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, sema.mod))); - } + .elem_ptr => blk: { + const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; + const elem_ty = elem_ptr.elem_ty; + var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.array_ptr, null); + + // This code assumes that elem_ptrs have been "flattened" in order for direct dereference + // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that + // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" + if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { + assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, sema.mod))); + } + + if (elem_ptr.index != 0) { + if (elem_ty.hasWellDefinedLayout(mod)) { + if (deref.parent) |*parent| { + // Update the byte offset (in-place) + const elem_size = try sema.typeAbiSize(elem_ty); + const offset = parent.byte_offset + elem_size * elem_ptr.index; + parent.byte_offset = try sema.usizeCast(block, src, offset); + } + } else { + deref.parent = null; + deref.ty_without_well_defined_layout = elem_ty; + } + } + + // If we're loading an elem_ptr that was derived from a different type + // than the true type of the underlying decl, we cannot deref directly + const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { + const deref_elem_ty = deref.pointee.?.ty.childType(mod); + break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; + } else false; + if (!ty_matches) { + deref.pointee = null; + break :blk deref; + } + + var array_tv = deref.pointee.?; + const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); + if (maybe_array_ty) |load_ty| { + // It's possible that we're loading a [N]T, in which case we'd like to slice + // the pointee array directly from our parent array. + if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, sema.mod)) { + const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); + deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ + .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), + .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), + } else null; + break :blk deref; + } + } + + if (elem_ptr.index >= check_len) { + deref.pointee = null; + break :blk deref; + } + if (elem_ptr.index == check_len - 1) { + if (array_tv.ty.sentinel(mod)) |sent| { + deref.pointee = TypedValue{ + .ty = elem_ty, + .val = sent, + }; + break :blk deref; + } + } + deref.pointee = TypedValue{ + .ty = elem_ty, + .val = try array_tv.val.elemValue(sema.mod, sema.arena, elem_ptr.index), + }; + break :blk deref; + }, - if (elem_ptr.index != 0) { - if (elem_ty.hasWellDefinedLayout(mod)) { - if (deref.parent) |*parent| { + .slice => blk: { + const slice = ptr_val.castTag(.slice).?.data; + break :blk try sema.beginComptimePtrLoad(block, src, slice.ptr, null); + }, + + .field_ptr => blk: { + const field_ptr = ptr_val.castTag(.field_ptr).?.data; + const field_index = @intCast(u32, field_ptr.field_index); + var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); + + if (field_ptr.container_ty.hasWellDefinedLayout(mod)) { + const struct_ty = field_ptr.container_ty.castTag(.@"struct"); + if (struct_ty != null and struct_ty.?.data.layout == .Packed) { + // packed structs are not byte addressable + deref.parent = null; + } else if (deref.parent) |*parent| { // Update the byte offset (in-place) - const elem_size = try sema.typeAbiSize(elem_ty); - const offset = parent.byte_offset + elem_size * elem_ptr.index; - parent.byte_offset = try sema.usizeCast(block, src, offset); + try sema.resolveTypeLayout(field_ptr.container_ty); + const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod); + parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); } } else { deref.parent = null; - deref.ty_without_well_defined_layout = elem_ty; + deref.ty_without_well_defined_layout = field_ptr.container_ty; } - } - // If we're loading an elem_ptr that was derived from a different type - // than the true type of the underlying decl, we cannot deref directly - const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { - const deref_elem_ty = deref.pointee.?.ty.childType(mod); - break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; - } else false; - if (!ty_matches) { - deref.pointee = null; - break :blk deref; - } - - var array_tv = deref.pointee.?; - const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); - if (maybe_array_ty) |load_ty| { - // It's possible that we're loading a [N]T, in which case we'd like to slice - // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, sema.mod)) { - const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); - deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ - .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), - .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), - } else null; + const tv = deref.pointee orelse { + deref.pointee = null; + break :blk deref; + }; + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok; + if (!coerce_in_mem_ok) { + deref.pointee = null; break :blk deref; } - } - if (elem_ptr.index >= check_len) { - deref.pointee = null; - break :blk deref; - } - if (elem_ptr.index == check_len - 1) { - if (array_tv.ty.sentinel(mod)) |sent| { + if (field_ptr.container_ty.isSlice(mod)) { + const slice_val = tv.val.castTag(.slice).?.data; + deref.pointee = switch (field_index) { + Value.Payload.Slice.ptr_index => TypedValue{ + .ty = field_ptr.container_ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), + .val = slice_val.ptr, + }, + Value.Payload.Slice.len_index => TypedValue{ + .ty = Type.usize, + .val = slice_val.len, + }, + else => unreachable, + }; + } else { + const field_ty = field_ptr.container_ty.structFieldType(field_index); deref.pointee = TypedValue{ - .ty = elem_ty, - .val = sent, + .ty = field_ty, + .val = tv.val.fieldValue(tv.ty, mod, field_index), }; - break :blk deref; } - } - deref.pointee = TypedValue{ - .ty = elem_ty, - .val = try array_tv.val.elemValue(sema.mod, sema.arena, elem_ptr.index), - }; - break :blk deref; - }, + break :blk deref; + }, - .slice => blk: { - const slice = ptr_val.castTag(.slice).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, slice.ptr, null); - }, + .comptime_field_ptr => blk: { + const comptime_field_ptr = ptr_val.castTag(.comptime_field_ptr).?.data; + break :blk ComptimePtrLoadKit{ + .parent = null, + .pointee = .{ .ty = comptime_field_ptr.field_ty, .val = comptime_field_ptr.field_val }, + .is_mutable = false, + .ty_without_well_defined_layout = comptime_field_ptr.field_ty, + }; + }, - .field_ptr => blk: { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); - var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); + .opt_payload_ptr, + .eu_payload_ptr, + => blk: { + const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; + const payload_ty = switch (ptr_val.tag()) { + .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(), + .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod), + else => unreachable, + }; + var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty); - if (field_ptr.container_ty.hasWellDefinedLayout(mod)) { - const struct_ty = field_ptr.container_ty.castTag(.@"struct"); - if (struct_ty != null and struct_ty.?.data.layout == .Packed) { - // packed structs are not byte addressable + // eu_payload_ptr and opt_payload_ptr never have a well-defined layout + if (deref.parent != null) { deref.parent = null; - } else if (deref.parent) |*parent| { - // Update the byte offset (in-place) - try sema.resolveTypeLayout(field_ptr.container_ty); - const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod); - parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); + deref.ty_without_well_defined_layout = payload_ptr.container_ty; } - } else { - deref.parent = null; - deref.ty_without_well_defined_layout = field_ptr.container_ty; - } - const tv = deref.pointee orelse { - deref.pointee = null; - break :blk deref; - }; - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok; - if (!coerce_in_mem_ok) { + if (deref.pointee) |*tv| { + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok; + if (coerce_in_mem_ok) { + const payload_val = switch (ptr_val.tag()) { + .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { + return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); + }, + .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { + if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); + break :opt tv.val; + }, + else => unreachable, + }; + tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; + break :blk deref; + } + } deref.pointee = null; break :blk deref; - } - - if (field_ptr.container_ty.isSlice(mod)) { - const slice_val = tv.val.castTag(.slice).?.data; - deref.pointee = switch (field_index) { - Value.Payload.Slice.ptr_index => TypedValue{ - .ty = field_ptr.container_ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), - .val = slice_val.ptr, - }, - Value.Payload.Slice.len_index => TypedValue{ - .ty = Type.usize, - .val = slice_val.len, - }, - else => unreachable, - }; - } else { - const field_ty = field_ptr.container_ty.structFieldType(field_index); - deref.pointee = TypedValue{ - .ty = field_ty, - .val = tv.val.fieldValue(tv.ty, mod, field_index), - }; - } - break :blk deref; - }, - - .comptime_field_ptr => blk: { - const comptime_field_ptr = ptr_val.castTag(.comptime_field_ptr).?.data; - break :blk ComptimePtrLoadKit{ - .parent = null, - .pointee = .{ .ty = comptime_field_ptr.field_ty, .val = comptime_field_ptr.field_val }, - .is_mutable = false, - .ty_without_well_defined_layout = comptime_field_ptr.field_ty, - }; - }, - - .opt_payload_ptr, - .eu_payload_ptr, - => blk: { - const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - const payload_ty = switch (ptr_val.tag()) { - .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(), - .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod), - else => unreachable, - }; - var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty); + }, + .opt_payload => blk: { + const opt_payload = ptr_val.castTag(.opt_payload).?.data; + break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null); + }, - // eu_payload_ptr and opt_payload_ptr never have a well-defined layout - if (deref.parent != null) { - deref.parent = null; - deref.ty_without_well_defined_layout = payload_ptr.container_ty; - } + .zero, + .one, + .int_u64, + .int_i64, + .int_big_positive, + .int_big_negative, + .variable, + .extern_fn, + .function, + => return error.RuntimeLoad, - if (deref.pointee) |*tv| { - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok; - if (coerce_in_mem_ok) { - const payload_val = switch (ptr_val.tag()) { - .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { - return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); - }, - .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); - break :opt tv.val; - }, - else => unreachable, - }; - tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; - break :blk deref; - } - } - deref.pointee = null; - break :blk deref; - }, - .null_value => { - return sema.fail(block, src, "attempt to use null value", .{}); - }, - .opt_payload => blk: { - const opt_payload = ptr_val.castTag(.opt_payload).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null); + else => unreachable, }, - - .zero, - .one, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, - .variable, - .extern_fn, - .function, - => return error.RuntimeLoad, - else => unreachable, }; @@ -28953,7 +28973,7 @@ fn coerceTupleToStruct( const field_name = fields.keys()[i]; const field = fields.values()[i]; const field_src = inst_src; // TODO better source location - if (field.default_val.tag() == .unreachable_value) { + if (field.default_val.ip_index == .unreachable_value) { const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { @@ -29023,7 +29043,7 @@ fn coerceTupleToTuple( const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); const coerced = try sema.coerce(block, field_ty, elem_ref, field_src); field_refs[field_index] = coerced; - if (default_val.tag() != .unreachable_value) { + if (default_val.ip_index != .unreachable_value) { const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; @@ -29052,7 +29072,7 @@ fn coerceTupleToTuple( const field_ty = tuple_ty.structFieldType(i); const field_src = inst_src; // TODO better source location - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { if (tuple_ty.isTuple()) { const template = "missing tuple field: {d}"; if (root_msg) |msg| { @@ -31557,7 +31577,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].tag() != .unreachable_value; + const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty)) { return true; } @@ -32141,7 +32161,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void gop.value_ptr.* = .{ .ty = Type.noreturn, .abi_align = 0, - .default_val = Value.initTag(.unreachable_value), + .default_val = Value.@"unreachable", .is_comptime = is_comptime, .offset = undefined, }; @@ -32965,7 +32985,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { => return null, .void => return Value.void, - .noreturn => return Value.initTag(.unreachable_value), + .noreturn => return Value.@"unreachable", .null => return Value.null, .undefined => return Value.undef, @@ -33027,7 +33047,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.values, 0..) |val, i| { - const is_comptime = val.tag() != .unreachable_value; + const is_comptime = val.ip_index != .unreachable_value; if (is_comptime) continue; if ((try sema.typeHasOnePossibleValue(tuple.types[i])) != null) continue; return null; @@ -33059,7 +33079,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } switch (enum_obj.fields.count()) { - 0 => return Value.initTag(.unreachable_value), + 0 => return Value.@"unreachable", 1 => if (enum_obj.values.count() == 0) { return Value.zero; // auto-numbered } else { @@ -33072,7 +33092,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const resolved_ty = try sema.resolveTypeFields(ty); const enum_simple = resolved_ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { - 0 => return Value.initTag(.unreachable_value), + 0 => return Value.@"unreachable", 1 => return Value.zero, else => return null, } @@ -33091,7 +33111,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse return null; const fields = union_obj.fields.values(); - if (fields.len == 0) return Value.initTag(.unreachable_value); + if (fields.len == 0) return Value.@"unreachable"; const only_field = fields[0]; if (only_field.ty.eql(resolved_ty, sema.mod)) { const msg = try Module.ErrorMsg.create( @@ -33600,7 +33620,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].tag() != .unreachable_value; + const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; if (!have_comptime_val and try sema.typeRequiresComptime(field_ty)) { return true; } @@ -33814,7 +33834,7 @@ fn numberAddWrapScalar( rhs: Value, ty: Type, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const mod = sema.mod; if (ty.zigTypeTag(mod) == .ComptimeInt) { @@ -33874,7 +33894,7 @@ fn numberSubWrapScalar( rhs: Value, ty: Type, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const mod = sema.mod; if (ty.zigTypeTag(mod) == .ComptimeInt) { @@ -34156,12 +34176,16 @@ fn intFitsInType( ) CompileError!bool { const mod = sema.mod; const target = mod.getTarget(); - switch (val.tag()) { - .zero, + switch (val.ip_index) { .undef, + .zero, + .zero_usize, + .zero_u8, => return true, - .one => switch (ty.zigTypeTag(mod)) { + .one, + .one_usize, + => switch (ty.zigTypeTag(mod)) { .Int => { const info = ty.intInfo(mod); return switch (info.signedness) { @@ -34173,111 +34197,129 @@ fn intFitsInType( else => unreachable, }, - .lazy_align => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); - // If it is u16 or bigger we know the alignment fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; + .none => switch (val.tag()) { + .zero => return true, + + .one => switch (ty.zigTypeTag(mod)) { + .Int => { + const info = ty.intInfo(mod); + return switch (info.signedness) { + .signed => info.bits >= 2, + .unsigned => info.bits >= 1, + }; + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .lazy_size => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); - // If it is u64 or bigger we know the size fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; + + .lazy_align => switch (ty.zigTypeTag(mod)) { + .Int => { + const info = ty.intInfo(mod); + const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); + // If it is u16 or bigger we know the alignment fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, + .ComptimeInt => return true, + else => unreachable, + }, + .lazy_size => switch (ty.zigTypeTag(mod)) { + .Int => { + const info = ty.intInfo(mod); + const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); + // If it is u64 or bigger we know the size fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_u64 => switch (ty.zigTypeTag(mod)) { - .Int => { - const x = val.castTag(.int_u64).?.data; - if (x == 0) return true; - const info = ty.intInfo(mod); - const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= needed_bits; + .int_u64 => switch (ty.zigTypeTag(mod)) { + .Int => { + const x = val.castTag(.int_u64).?.data; + if (x == 0) return true; + const info = ty.intInfo(mod); + const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= needed_bits; + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_i64 => switch (ty.zigTypeTag(mod)) { - .Int => { - const x = val.castTag(.int_i64).?.data; - if (x == 0) return true; - const info = ty.intInfo(mod); - if (info.signedness == .unsigned and x < 0) - return false; - var buffer: Value.BigIntSpace = undefined; - return (try val.toBigIntAdvanced(&buffer, mod, sema)).fitsInTwosComp(info.signedness, info.bits); + .int_i64 => switch (ty.zigTypeTag(mod)) { + .Int => { + const x = val.castTag(.int_i64).?.data; + if (x == 0) return true; + const info = ty.intInfo(mod); + if (info.signedness == .unsigned and x < 0) + return false; + var buffer: Value.BigIntSpace = undefined; + return (try val.toBigIntAdvanced(&buffer, mod, sema)).fitsInTwosComp(info.signedness, info.bits); + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_big_positive => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); + .int_big_positive => switch (ty.zigTypeTag(mod)) { + .Int => { + const info = ty.intInfo(mod); + return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_big_negative => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); + .int_big_negative => switch (ty.zigTypeTag(mod)) { + .Int => { + const info = ty.intInfo(mod); + return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .the_only_possible_value => { - assert(ty.intInfo(mod).bits == 0); - return true; - }, + .the_only_possible_value => { + assert(ty.intInfo(mod).bits == 0); + return true; + }, - .decl_ref_mut, - .extern_fn, - .decl_ref, - .function, - .variable, - => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const ptr_bits = target.ptrBitWidth(); - return switch (info.signedness) { - .signed => info.bits > ptr_bits, - .unsigned => info.bits >= ptr_bits, - }; + .decl_ref_mut, + .extern_fn, + .decl_ref, + .function, + .variable, + => switch (ty.zigTypeTag(mod)) { + .Int => { + const info = ty.intInfo(mod); + const ptr_bits = target.ptrBitWidth(); + return switch (info.signedness) { + .signed => info.bits > ptr_bits, + .unsigned => info.bits >= ptr_bits, + }; + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .aggregate => { - assert(ty.zigTypeTag(mod) == .Vector); - for (val.castTag(.aggregate).?.data, 0..) |elem, i| { - if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) { - if (vector_index) |some| some.* = i; - return false; + .aggregate => { + assert(ty.zigTypeTag(mod) == .Vector); + for (val.castTag(.aggregate).?.data, 0..) |elem, i| { + if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) { + if (vector_index) |some| some.* = i; + return false; + } } - } - return true; + return true; + }, + + else => unreachable, }, - else => unreachable, + else => @panic("TODO"), } } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 7f599caafbec..0efd39637390 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -76,34 +76,236 @@ pub fn print( if (val.isVariable(mod)) return writer.writeAll("(variable)"); - while (true) switch (val.tag()) { - .empty_struct_value, .aggregate => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - if (ty.zigTypeTag(mod) == .Struct) { - try writer.writeAll(".{"); - const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); + while (true) switch (val.ip_index) { + .none => switch (val.tag()) { + .empty_struct_value, .aggregate => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + if (ty.zigTypeTag(mod) == .Struct) { + try writer.writeAll(".{"); + const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); - var i: u32 = 0; - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - switch (ty.tag()) { - .anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}), - else => {}, + var i: u32 = 0; + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + switch (ty.tag()) { + .anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}), + else => {}, + } + try print(.{ + .ty = ty.structFieldType(i), + .val = val.fieldValue(ty, mod, i), + }, writer, level - 1, mod); } + if (ty.structFieldCount() > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll("}"); + } else { + const elem_ty = ty.elemType2(mod); + const len = ty.arrayLen(mod); + + if (elem_ty.eql(Type.u8, mod)) str: { + const max_len = @intCast(usize, std.math.min(len, max_string_len)); + var buf: [max_string_len]u8 = undefined; + + var i: u32 = 0; + while (i < max_len) : (i += 1) { + const elem = val.fieldValue(ty, mod, i); + if (elem.isUndef()) break :str; + buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; + } + + const truncated = if (len > max_string_len) " (truncated)" else ""; + return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); + } + + try writer.writeAll(".{ "); + + const max_len = std.math.min(len, max_aggregate_items); + var i: u32 = 0; + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + try print(.{ + .ty = elem_ty, + .val = val.fieldValue(ty, mod, i), + }, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); + } + }, + .@"union" => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const union_val = val.castTag(.@"union").?.data; + try writer.writeAll(".{ "); + + try print(.{ + .ty = ty.cast(Type.Payload.Union).?.data.tag_ty, + .val = union_val.tag, + }, writer, level - 1, mod); + try writer.writeAll(" = "); + try print(.{ + .ty = ty.unionFieldType(union_val.tag, mod), + .val = union_val.val, + }, writer, level - 1, mod); + + return writer.writeAll(" }"); + }, + .zero => return writer.writeAll("0"), + .one => return writer.writeAll("1"), + .the_only_possible_value => return writer.writeAll("0"), + .ty => return val.castTag(.ty).?.data.print(writer, mod), + .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), + .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), + .int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), + .int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), + .lazy_align => { + const sub_ty = val.castTag(.lazy_align).?.data; + const x = sub_ty.abiAlignment(mod); + return writer.print("{d}", .{x}); + }, + .lazy_size => { + const sub_ty = val.castTag(.lazy_size).?.data; + const x = sub_ty.abiSize(mod); + return writer.print("{d}", .{x}); + }, + .function => return writer.print("(function '{s}')", .{ + mod.declPtr(val.castTag(.function).?.data.owner_decl).name, + }), + .extern_fn => return writer.writeAll("(extern function)"), + .variable => unreachable, + .decl_ref_mut => { + const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; + const decl = mod.declPtr(decl_index); + if (level == 0) { + return writer.print("(decl ref mut '{s}')", .{decl.name}); + } + return print(.{ + .ty = decl.ty, + .val = decl.val, + }, writer, level - 1, mod); + }, + .decl_ref => { + const decl_index = val.castTag(.decl_ref).?.data; + const decl = mod.declPtr(decl_index); + if (level == 0) { + return writer.print("(decl ref '{s}')", .{decl.name}); + } + return print(.{ + .ty = decl.ty, + .val = decl.val, + }, writer, level - 1, mod); + }, + .comptime_field_ptr => { + const payload = val.castTag(.comptime_field_ptr).?.data; + if (level == 0) { + return writer.writeAll("(comptime field ptr)"); + } + return print(.{ + .ty = payload.field_ty, + .val = payload.field_val, + }, writer, level - 1, mod); + }, + .elem_ptr => { + const elem_ptr = val.castTag(.elem_ptr).?.data; + try writer.writeAll("&"); + if (level == 0) { + try writer.writeAll("(ptr)"); + } else { try print(.{ - .ty = ty.structFieldType(i), - .val = val.fieldValue(ty, mod, i), + .ty = elem_ptr.elem_ty, + .val = elem_ptr.array_ptr, }, writer, level - 1, mod); } - if (ty.structFieldCount() > max_aggregate_items) { + return writer.print("[{}]", .{elem_ptr.index}); + }, + .field_ptr => { + const field_ptr = val.castTag(.field_ptr).?.data; + try writer.writeAll("&"); + if (level == 0) { + try writer.writeAll("(ptr)"); + } else { + try print(.{ + .ty = field_ptr.container_ty, + .val = field_ptr.container_ptr, + }, writer, level - 1, mod); + } + + if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) { + switch (field_ptr.container_ty.tag()) { + .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), + else => { + const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index); + return writer.print(".{s}", .{field_name}); + }, + } + } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { + const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; + return writer.print(".{s}", .{field_name}); + } else if (field_ptr.container_ty.isSlice(mod)) { + switch (field_ptr.field_index) { + Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"), + Value.Payload.Slice.len_index => return writer.writeAll(".len"), + else => unreachable, + } + } + }, + .empty_array => return writer.writeAll(".{}"), + .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), + .enum_field_index => { + return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data)}); + }, + .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), + .str_lit => { + const str_lit = val.castTag(.str_lit).?.data; + const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); + }, + .repeated => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + var i: u32 = 0; + try writer.writeAll(".{ "); + const elem_tv = TypedValue{ + .ty = ty.elemType2(mod), + .val = val.castTag(.repeated).?.data, + }; + const len = ty.arrayLen(mod); + const max_len = std.math.min(len, max_aggregate_items); + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + try print(elem_tv, writer, level - 1, mod); + } + if (len > max_aggregate_items) { try writer.writeAll(", ..."); } - return writer.writeAll("}"); - } else { + return writer.writeAll(" }"); + }, + .empty_array_sentinel => { + if (level == 0) { + return writer.writeAll(".{ (sentinel) }"); + } + try writer.writeAll(".{ "); + try print(.{ + .ty = ty.elemType2(mod), + .val = ty.sentinel(mod).?, + }, writer, level - 1, mod); + return writer.writeAll(" }"); + }, + .slice => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const payload = val.castTag(.slice).?.data; const elem_ty = ty.elemType2(mod); - const len = ty.arrayLen(mod); + const len = payload.len.toUnsignedInt(mod); if (elem_ty.eql(Type.u8, mod)) str: { const max_len = @intCast(usize, std.math.min(len, max_string_len)); @@ -111,11 +313,13 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = val.fieldValue(ty, mod, i); - if (elem.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; + var elem_buf: Value.ElemValueBuffer = undefined; + const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); + if (elem_val.isUndef()) break :str; + buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; } + // TODO would be nice if this had a bit of unicode awareness. const truncated = if (len > max_string_len) " (truncated)" else ""; return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); } @@ -126,292 +330,91 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); + var buf: Value.ElemValueBuffer = undefined; try print(.{ .ty = elem_ty, - .val = val.fieldValue(ty, mod, i), + .val = payload.ptr.elemValueBuffer(mod, i, &buf), }, writer, level - 1, mod); } if (len > max_aggregate_items) { try writer.writeAll(", ..."); } return writer.writeAll(" }"); - } - }, - .@"union" => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - const union_val = val.castTag(.@"union").?.data; - try writer.writeAll(".{ "); - - try print(.{ - .ty = ty.cast(Type.Payload.Union).?.data.tag_ty, - .val = union_val.tag, - }, writer, level - 1, mod); - try writer.writeAll(" = "); - try print(.{ - .ty = ty.unionFieldType(union_val.tag, mod), - .val = union_val.val, - }, writer, level - 1, mod); - - return writer.writeAll(" }"); - }, - .null_value => return writer.writeAll("null"), - .undef => return writer.writeAll("undefined"), - .zero => return writer.writeAll("0"), - .one => return writer.writeAll("1"), - .unreachable_value => return writer.writeAll("unreachable"), - .the_only_possible_value => return writer.writeAll("0"), - .ty => return val.castTag(.ty).?.data.print(writer, mod), - .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), - .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), - .int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), - .int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), - .lazy_align => { - const sub_ty = val.castTag(.lazy_align).?.data; - const x = sub_ty.abiAlignment(mod); - return writer.print("{d}", .{x}); - }, - .lazy_size => { - const sub_ty = val.castTag(.lazy_size).?.data; - const x = sub_ty.abiSize(mod); - return writer.print("{d}", .{x}); - }, - .function => return writer.print("(function '{s}')", .{ - mod.declPtr(val.castTag(.function).?.data.owner_decl).name, - }), - .extern_fn => return writer.writeAll("(extern function)"), - .variable => unreachable, - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref mut '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .comptime_field_ptr => { - const payload = val.castTag(.comptime_field_ptr).?.data; - if (level == 0) { - return writer.writeAll("(comptime field ptr)"); - } - return print(.{ - .ty = payload.field_ty, - .val = payload.field_val, - }, writer, level - 1, mod); - }, - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { + }, + .float_16 => return writer.print("{d}", .{val.castTag(.float_16).?.data}), + .float_32 => return writer.print("{d}", .{val.castTag(.float_32).?.data}), + .float_64 => return writer.print("{d}", .{val.castTag(.float_64).?.data}), + .float_80 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_80).?.data)}), + .float_128 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_128).?.data)}), + .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}), + .eu_payload => { + val = val.castTag(.eu_payload).?.data; + ty = ty.errorUnionPayload(); + }, + .opt_payload => { + val = val.castTag(.opt_payload).?.data; + ty = ty.optionalChild(mod); + return print(.{ .ty = ty, .val = val }, writer, level, mod); + }, + .eu_payload_ptr => { + try writer.writeAll("&"); + + const data = val.castTag(.eu_payload_ptr).?.data; + + var ty_val: Value.Payload.Ty = .{ + .base = .{ .tag = .ty }, + .data = ty, + }; + + try writer.writeAll("@as("); try print(.{ - .ty = elem_ptr.elem_ty, - .val = elem_ptr.array_ptr, + .ty = Type.type, + .val = Value.initPayload(&ty_val.base), }, writer, level - 1, mod); - } - return writer.print("[{}]", .{elem_ptr.index}); - }, - .field_ptr => { - const field_ptr = val.castTag(.field_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { + + try writer.writeAll(", &(payload of "); + try print(.{ - .ty = field_ptr.container_ty, - .val = field_ptr.container_ptr, + .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), + .val = data.container_ptr, }, writer, level - 1, mod); - } - - if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) { - switch (field_ptr.container_ty.tag()) { - .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), - else => { - const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index); - return writer.print(".{s}", .{field_name}); - }, - } - } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { - const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; - return writer.print(".{s}", .{field_name}); - } else if (field_ptr.container_ty.isSlice(mod)) { - switch (field_ptr.field_index) { - Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"), - Value.Payload.Slice.len_index => return writer.writeAll(".len"), - else => unreachable, - } - } - }, - .empty_array => return writer.writeAll(".{}"), - .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), - .enum_field_index => { - return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data)}); - }, - .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); - }, - .repeated => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - var i: u32 = 0; - try writer.writeAll(".{ "); - const elem_tv = TypedValue{ - .ty = ty.elemType2(mod), - .val = val.castTag(.repeated).?.data, - }; - const len = ty.arrayLen(mod); - const max_len = std.math.min(len, max_aggregate_items); - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - try print(elem_tv, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .empty_array_sentinel => { - if (level == 0) { - return writer.writeAll(".{ (sentinel) }"); - } - try writer.writeAll(".{ "); - try print(.{ - .ty = ty.elemType2(mod), - .val = ty.sentinel(mod).?, - }, writer, level - 1, mod); - return writer.writeAll(" }"); - }, - .slice => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - const payload = val.castTag(.slice).?.data; - const elem_ty = ty.elemType2(mod); - const len = payload.len.toUnsignedInt(mod); - - if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); - var buf: [max_string_len]u8 = undefined; - var i: u32 = 0; - while (i < max_len) : (i += 1) { - var elem_buf: Value.ElemValueBuffer = undefined; - const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); - if (elem_val.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; - } - - // TODO would be nice if this had a bit of unicode awareness. - const truncated = if (len > max_string_len) " (truncated)" else ""; - return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); - } + try writer.writeAll("))"); + return; + }, + .opt_payload_ptr => { + const data = val.castTag(.opt_payload_ptr).?.data; - try writer.writeAll(".{ "); + var ty_val: Value.Payload.Ty = .{ + .base = .{ .tag = .ty }, + .data = ty, + }; - const max_len = std.math.min(len, max_aggregate_items); - var i: u32 = 0; - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - var buf: Value.ElemValueBuffer = undefined; + try writer.writeAll("@as("); try print(.{ - .ty = elem_ty, - .val = payload.ptr.elemValueBuffer(mod, i, &buf), + .ty = Type.type, + .val = Value.initPayload(&ty_val.base), }, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .float_16 => return writer.print("{d}", .{val.castTag(.float_16).?.data}), - .float_32 => return writer.print("{d}", .{val.castTag(.float_32).?.data}), - .float_64 => return writer.print("{d}", .{val.castTag(.float_64).?.data}), - .float_80 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_80).?.data)}), - .float_128 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_128).?.data)}), - .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}), - .eu_payload => { - val = val.castTag(.eu_payload).?.data; - ty = ty.errorUnionPayload(); - }, - .opt_payload => { - val = val.castTag(.opt_payload).?.data; - ty = ty.optionalChild(mod); - return print(.{ .ty = ty, .val = val }, writer, level, mod); - }, - .eu_payload_ptr => { - try writer.writeAll("&"); - const data = val.castTag(.eu_payload_ptr).?.data; + try writer.writeAll(", &(payload of "); - var ty_val: Value.Payload.Ty = .{ - .base = .{ .tag = .ty }, - .data = ty, - }; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = Value.initPayload(&ty_val.base), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); + try print(.{ + .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), + .val = data.container_ptr, + }, writer, level - 1, mod); - try print(.{ - .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), - .val = data.container_ptr, - }, writer, level - 1, mod); + try writer.writeAll("))"); + return; + }, - try writer.writeAll("))"); - return; + // TODO these should not appear in this function + .inferred_alloc => return writer.writeAll("(inferred allocation value)"), + .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), + .runtime_value => return writer.writeAll("[runtime value]"), }, - .opt_payload_ptr => { - const data = val.castTag(.opt_payload_ptr).?.data; - - var ty_val: Value.Payload.Ty = .{ - .base = .{ .tag = .ty }, - .data = ty, - }; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = Value.initPayload(&ty_val.base), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); - - try print(.{ - .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), - .val = data.container_ptr, - }, writer, level - 1, mod); - - try writer.writeAll("))"); + else => { + try writer.print("(interned: {})", .{val.ip_index}); return; }, - - // TODO these should not appear in this function - .inferred_alloc => return writer.writeAll("(inferred allocation value)"), - .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), - .runtime_value => return writer.writeAll("[runtime value]"), }; } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 96304628e9b1..ea7134c603e9 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3088,11 +3088,15 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { 64 => return WValue{ .float64 = val.toFloat(f64) }, else => unreachable, }, - .Pointer => switch (val.tag()) { - .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), - .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, - .zero, .null_value => return WValue{ .imm32 = 0 }, - else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), + .Pointer => switch (val.ip_index) { + .null_value => return WValue{ .imm32 = 0 }, + .none => switch (val.tag()) { + .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), + .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, + .zero => return WValue{ .imm32 = 0 }, + else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), + }, + else => unreachable, }, .Enum => { if (val.castTag(.enum_field_index)) |field_index| { diff --git a/src/codegen.zig b/src/codegen.zig index a80740050233..25e8d892d846 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -312,7 +312,7 @@ pub fn generateSymbol( ), }, }, - .Pointer => switch (typed_value.val.tag()) { + .Pointer => switch (typed_value.val.ip_index) { .null_value => { switch (target.ptrBitWidth()) { 32 => { @@ -327,76 +327,79 @@ pub fn generateSymbol( } return Result.ok; }, - .zero, .one, .int_u64, .int_big_positive => { - switch (target.ptrBitWidth()) { - 32 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); - }, - 64 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - }, - else => unreachable, - } - return Result.ok; - }, - .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( - bin_file, - src_loc, - typed_value, - switch (tag) { - .variable => typed_value.val.castTag(.variable).?.data.owner_decl, - .decl_ref => typed_value.val.castTag(.decl_ref).?.data, - .decl_ref_mut => typed_value.val.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, + .none => switch (typed_value.val.tag()) { + .zero, .one, .int_u64, .int_big_positive => { + switch (target.ptrBitWidth()) { + 32 => { + const x = typed_value.val.toUnsignedInt(mod); + mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); + }, + 64 => { + const x = typed_value.val.toUnsignedInt(mod); + mem.writeInt(u64, try code.addManyAsArray(8), x, endian); + }, + else => unreachable, + } + return Result.ok; }, - code, - debug_output, - reloc_info, - ), - .slice => { - const slice = typed_value.val.castTag(.slice).?.data; + .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( + bin_file, + src_loc, + typed_value, + switch (tag) { + .variable => typed_value.val.castTag(.variable).?.data.owner_decl, + .decl_ref => typed_value.val.castTag(.decl_ref).?.data, + .decl_ref_mut => typed_value.val.castTag(.decl_ref_mut).?.data.decl_index, + else => unreachable, + }, + code, + debug_output, + reloc_info, + ), + .slice => { + const slice = typed_value.val.castTag(.slice).?.data; - // generate ptr - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = slice_ptr_field_type, - .val = slice.ptr, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + // generate ptr + var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = slice_ptr_field_type, + .val = slice.ptr, + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } - // generate length - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.usize, - .val = slice.len, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + // generate length + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = Type.usize, + .val = slice.len, + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } - return Result.ok; - }, - .field_ptr, .elem_ptr, .opt_payload_ptr => return lowerParentPtr( - bin_file, - src_loc, - typed_value, - typed_value.val, - code, - debug_output, - reloc_info, - ), - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, + return Result.ok; + }, + .field_ptr, .elem_ptr, .opt_payload_ptr => return lowerParentPtr( + bin_file, src_loc, - "TODO implement generateSymbol for pointer type value: '{s}'", - .{@tagName(typed_value.val.tag())}, + typed_value, + typed_value.val, + code, + debug_output, + reloc_info, ), + else => return Result{ + .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO implement generateSymbol for pointer type value: '{s}'", + .{@tagName(typed_value.val.tag())}, + ), + }, }, + else => unreachable, }, .Int => { const info = typed_value.ty.intInfo(mod); @@ -652,7 +655,7 @@ pub fn generateSymbol( } const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; - const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef); + const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.undef; switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, .val = value, @@ -696,7 +699,7 @@ pub fn generateSymbol( // emit payload part of the error union { const begin = code.items.len; - const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.initTag(.undef); + const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.undef; switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_ty, .val = payload_val, @@ -1189,16 +1192,17 @@ pub fn genTypedValue( .Void => return GenResult.mcv(.none), .Pointer => switch (typed_value.ty.ptrSize(mod)) { .Slice => {}, - else => { - switch (typed_value.val.tag()) { - .null_value => { - return GenResult.mcv(.{ .immediate = 0 }); - }, + else => switch (typed_value.val.ip_index) { + .null_value => { + return GenResult.mcv(.{ .immediate = 0 }); + }, + .none => switch (typed_value.val.tag()) { .int_u64 => { return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) }); }, else => {}, - } + }, + else => {}, }, }, .Int => { @@ -1216,7 +1220,7 @@ pub fn genTypedValue( }, .Optional => { if (typed_value.ty.isPtrLikeOptional(mod)) { - if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 }); + if (typed_value.val.ip_index == .null_value) return GenResult.mcv(.{ .immediate = 0 }); return genTypedValue(bin_file, src_loc, .{ .ty = typed_value.ty.optionalChild(mod), diff --git a/src/codegen/c.zig b/src/codegen/c.zig index e6ec461e4311..cd3974bc91d8 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1045,8 +1045,8 @@ pub const DeclGen = struct { if (!empty) try writer.writeByte(')'); return; }, - .Pointer => switch (val.tag()) { - .null_value, .zero => if (ty.isSlice(mod)) { + .Pointer => switch (val.ip_index) { + .null_value => if (ty.isSlice(mod)) { var slice_pl = Value.Payload.Slice{ .base = .{ .tag = .slice }, .data = .{ .ptr = val, .len = Value.undef }, @@ -1059,46 +1059,63 @@ pub const DeclGen = struct { try dg.renderType(writer, ty); try writer.writeAll(")NULL)"); }, - .variable => { - const decl = val.castTag(.variable).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - .slice => { - if (!location.isInitializer()) { - try writer.writeByte('('); + .none => switch (val.tag()) { + .zero => if (ty.isSlice(mod)) { + var slice_pl = Value.Payload.Slice{ + .base = .{ .tag = .slice }, + .data = .{ .ptr = val, .len = Value.undef }, + }; + const slice_val = Value.initPayload(&slice_pl.base); + + return dg.renderValue(writer, ty, slice_val, location); + } else { + try writer.writeAll("(("); try dg.renderType(writer, ty); - try writer.writeByte(')'); - } + try writer.writeAll(")NULL)"); + }, + .variable => { + const decl = val.castTag(.variable).?.data.owner_decl; + return dg.renderDeclValue(writer, ty, val, decl, location); + }, + .slice => { + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } - const slice = val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const slice = val.castTag(.slice).?.data; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try writer.writeByte('{'); - try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, initializer_type); - try writer.writeAll(", "); - try dg.renderValue(writer, Type.usize, slice.len, initializer_type); - try writer.writeByte('}'); - }, - .function => { - const func = val.castTag(.function).?.data; - try dg.renderDeclName(writer, func.owner_decl, 0); - }, - .extern_fn => { - const extern_fn = val.castTag(.extern_fn).?.data; - try dg.renderDeclName(writer, extern_fn.owner_decl, 0); - }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); + try writer.writeByte('{'); + try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, initializer_type); + try writer.writeAll(", "); + try dg.renderValue(writer, Type.usize, slice.len, initializer_type); + try writer.writeByte('}'); + }, + .function => { + const func = val.castTag(.function).?.data; + try dg.renderDeclName(writer, func.owner_decl, 0); + }, + .extern_fn => { + const extern_fn = val.castTag(.extern_fn).?.data; + try dg.renderDeclName(writer, extern_fn.owner_decl, 0); + }, + .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); + }, + .field_ptr, + .elem_ptr, + .opt_payload_ptr, + .eu_payload_ptr, + .decl_ref_mut, + .decl_ref, + => try dg.renderParentPtr(writer, val, ty, location), + + else => unreachable, }, - .field_ptr, - .elem_ptr, - .opt_payload_ptr, - .eu_payload_ptr, - .decl_ref_mut, - .decl_ref, - => try dg.renderParentPtr(writer, val, ty, location), else => unreachable, }, .Array, .Vector => { @@ -1109,8 +1126,8 @@ pub const DeclGen = struct { } // First try specific tag representations for more efficiency. - switch (val.tag()) { - .undef, .empty_struct_value, .empty_array => { + switch (val.ip_index) { + .undef => { const ai = ty.arrayInfo(mod); try writer.writeByte('{'); if (ai.sentinel) |s| { @@ -1119,76 +1136,91 @@ pub const DeclGen = struct { try writer.writeByte('0'); } try writer.writeByte('}'); + return; }, - .bytes, .str_lit => |t| { - const bytes = switch (t) { - .bytes => val.castTag(.bytes).?.data, - .str_lit => bytes: { - const str_lit = val.castTag(.str_lit).?.data; - break :bytes dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - }, - else => unreachable, - }; - const sentinel = if (ty.sentinel(mod)) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; - try writer.print("{s}", .{ - fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen(mod))], sentinel), - }); - }, - else => { - // Fall back to generic implementation. - var arena = std.heap.ArenaAllocator.init(dg.gpa); - defer arena.deinit(); - const arena_allocator = arena.allocator(); - - // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal - const max_string_initializer_len = 65535; - - const ai = ty.arrayInfo(mod); - if (ai.elem_type.eql(Type.u8, dg.module)) { - if (ai.len <= max_string_initializer_len) { - var literal = stringLiteral(writer); - try literal.start(); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); - try literal.writeChar(elem_val_u8); - } - if (ai.sentinel) |s| { - const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); - if (s_u8 != 0) try literal.writeChar(s_u8); - } - try literal.end(); - } else { - try writer.writeByte('{'); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); - try writer.print("'\\x{x}'", .{elem_val_u8}); - } - if (ai.sentinel) |s| { - if (index != 0) try writer.writeByte(','); - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } - try writer.writeByte('}'); - } - } else { + .none => switch (val.tag()) { + .empty_struct_value, .empty_array => { + const ai = ty.arrayInfo(mod); try writer.writeByte('{'); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); - } if (ai.sentinel) |s| { - if (index != 0) try writer.writeByte(','); try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } else { + try writer.writeByte('0'); } try writer.writeByte('}'); - } + return; + }, + .bytes, .str_lit => |t| { + const bytes = switch (t) { + .bytes => val.castTag(.bytes).?.data, + .str_lit => bytes: { + const str_lit = val.castTag(.str_lit).?.data; + break :bytes dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + }, + else => unreachable, + }; + const sentinel = if (ty.sentinel(mod)) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; + try writer.print("{s}", .{ + fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen(mod))], sentinel), + }); + return; + }, + else => {}, }, + else => {}, + } + // Fall back to generic implementation. + var arena = std.heap.ArenaAllocator.init(dg.gpa); + defer arena.deinit(); + const arena_allocator = arena.allocator(); + + // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal + const max_string_initializer_len = 65535; + + const ai = ty.arrayInfo(mod); + if (ai.elem_type.eql(Type.u8, dg.module)) { + if (ai.len <= max_string_initializer_len) { + var literal = stringLiteral(writer); + try literal.start(); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try literal.writeChar(elem_val_u8); + } + if (ai.sentinel) |s| { + const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); + if (s_u8 != 0) try literal.writeChar(s_u8); + } + try literal.end(); + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try writer.print("'\\x{x}'", .{elem_val_u8}); + } + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); + } + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(dg.module, arena_allocator, index); + try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); + } + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); } }, .Bool => { @@ -1201,7 +1233,7 @@ pub const DeclGen = struct { .Optional => { const payload_ty = ty.optionalChild(mod); - const is_null_val = Value.makeBool(val.tag() == .null_value); + const is_null_val = Value.makeBool(val.ip_index == .null_value); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return dg.renderValue(writer, Type.bool, is_null_val, location); @@ -7765,7 +7797,7 @@ fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) T if (lowersToArray(ret_ty, mod)) { buffer.names = [1][]const u8{"array"}; buffer.types = [1]Type{ret_ty}; - buffer.values = [1]Value{Value.initTag(.unreachable_value)}; + buffer.values = [1]Value{Value.@"unreachable"}; buffer.payload = .{ .data = .{ .names = &buffer.names, .types = &buffer.types, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index f45a63df7271..558534a651ee 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2028,7 +2028,7 @@ pub const Object = struct { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; + if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; const field_size = field_ty.abiSize(mod); const field_align = field_ty.abiAlignment(mod); @@ -2498,7 +2498,7 @@ pub const DeclGen = struct { global.setGlobalConstant(.True); break :init_val decl.val; }; - if (init_val.tag() != .unreachable_value) { + if (init_val.ip_index != .unreachable_value) { const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val }); if (global.globalGetValueType() == llvm_init.typeOf()) { global.setInitializer(llvm_init); @@ -2954,7 +2954,7 @@ pub const DeclGen = struct { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; + if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); @@ -3359,58 +3359,65 @@ pub const DeclGen = struct { else => unreachable, } }, - .Pointer => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - .variable => { - const decl_index = tv.val.castTag(.variable).?.data.owner_decl; - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); - - const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - - const val = try dg.resolveGlobalDecl(decl_index); - const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) - else - val; - return addrspace_casted_ptr; - }, - .slice => { - const slice = tv.val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const fields: [2]*llvm.Value = .{ - try dg.lowerValue(.{ - .ty = tv.ty.slicePtrFieldType(&buf), - .val = slice.ptr, - }), - try dg.lowerValue(.{ - .ty = Type.usize, - .val = slice.len, - }), - }; - return dg.context.constStruct(&fields, fields.len, .False); - }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); - return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); - }, - .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { - return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); - }, - .null_value, .zero => { + .Pointer => switch (tv.val.ip_index) { + .null_value => { const llvm_type = try dg.lowerType(tv.ty); return llvm_type.constNull(); }, - .opt_payload => { - const payload = tv.val.castTag(.opt_payload).?.data; - return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); + .none => switch (tv.val.tag()) { + .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), + .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), + .variable => { + const decl_index = tv.val.castTag(.variable).?.data.owner_decl; + const decl = dg.module.declPtr(decl_index); + dg.module.markDeclAlive(decl); + + const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); + const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); + + const val = try dg.resolveGlobalDecl(decl_index); + const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) + val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) + else + val; + return addrspace_casted_ptr; + }, + .slice => { + const slice = tv.val.castTag(.slice).?.data; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const fields: [2]*llvm.Value = .{ + try dg.lowerValue(.{ + .ty = tv.ty.slicePtrFieldType(&buf), + .val = slice.ptr, + }), + try dg.lowerValue(.{ + .ty = Type.usize, + .val = slice.len, + }), + }; + return dg.context.constStruct(&fields, fields.len, .False); + }, + .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { + const llvm_usize = try dg.lowerType(Type.usize); + const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); + return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); + }, + .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { + return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); + }, + .zero => { + const llvm_type = try dg.lowerType(tv.ty); + return llvm_type.constNull(); + }, + .opt_payload => { + const payload = tv.val.castTag(.opt_payload).?.data; + return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); + }, + else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ + tv.ty.fmtDebug(), tag, + }), }, - else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ - tv.ty.fmtDebug(), tag, - }), + else => unreachable, }, .Array => switch (tv.val.tag()) { .bytes => { @@ -3555,7 +3562,7 @@ pub const DeclGen = struct { var fields_buf: [3]*llvm.Value = undefined; fields_buf[0] = try dg.lowerValue(.{ .ty = payload_ty, - .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef), + .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.undef, }); fields_buf[1] = non_null_bit; if (llvm_field_count > 2) { @@ -3606,7 +3613,7 @@ pub const DeclGen = struct { }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, - .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef), + .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.undef, }); var fields_buf: [3]*llvm.Value = undefined; @@ -3645,7 +3652,7 @@ pub const DeclGen = struct { var need_unnamed = false; for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].tag() != .unreachable_value) continue; + if (tuple.values[i].ip_index != .unreachable_value) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const field_align = field_ty.abiAlignment(mod); @@ -10501,7 +10508,7 @@ fn llvmFieldIndex( const tuple = ty.tupleFields(); var llvm_field_index: c_uint = 0; for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; + if (tuple.values[i].ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); @@ -11117,7 +11124,7 @@ fn isByRef(ty: Type, mod: *const Module) bool { const tuple = ty.tupleFields(); var count: usize = 0; for (tuple.values, 0..) |field_val, i| { - if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits(mod)) continue; + if (field_val.ip_index != .unreachable_value or !tuple.types[i].hasRuntimeBits(mod)) continue; count += 1; if (count > max_fields_byval) return true; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 9de2c03142c8..5fa81d19ff54 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -674,7 +674,7 @@ pub const DeclGen = struct { try self.lower(ptr_ty, slice.ptr); try self.addInt(Type.usize, slice.len); }, - .null_value, .zero => try self.addNullPtr(try dg.resolveType(ty, .indirect)), + .zero => try self.addNullPtr(try dg.resolveType(ty, .indirect)), .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { try self.addInt(Type.usize, val); }, @@ -813,7 +813,8 @@ pub const DeclGen = struct { const error_size = Type.anyerror.abiAlignment(mod); const ty_size = ty.abiSize(mod); const padding = ty_size - payload_size - error_size; - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef); + + const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; if (eu_layout.error_first) { try self.lower(Type.anyerror, error_val); @@ -1021,7 +1022,7 @@ pub const DeclGen = struct { return try self.constant(Type.anyerror, error_val, repr); } - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef); + const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; var members: [2]IdRef = undefined; if (eu_layout.error_first) { @@ -1292,7 +1293,7 @@ pub const DeclGen = struct { var member_index: usize = 0; for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; + if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; member_types[member_index] = try self.resolveType(field_ty, .indirect); member_index += 1; @@ -1596,7 +1597,7 @@ pub const DeclGen = struct { else decl.val; - if (init_val.tag() == .unreachable_value) { + if (init_val.ip_index == .unreachable_value) { return self.todo("importing extern variables", .{}); } diff --git a/src/type.zig b/src/type.zig index 1f970919c922..8cffddb31cb9 100644 --- a/src/type.zig +++ b/src/type.zig @@ -533,14 +533,14 @@ pub const Type = struct { for (a_tuple.values, 0..) |a_val, i| { const ty = a_tuple.types[i]; const b_val = b_tuple.values[i]; - if (a_val.tag() == .unreachable_value) { - if (b_val.tag() == .unreachable_value) { + if (a_val.ip_index == .unreachable_value) { + if (b_val.ip_index == .unreachable_value) { continue; } else { return false; } } else { - if (b_val.tag() == .unreachable_value) { + if (b_val.ip_index == .unreachable_value) { return false; } else { if (!Value.eql(a_val, b_val, ty, mod)) return false; @@ -569,14 +569,14 @@ pub const Type = struct { for (a_struct_obj.values, 0..) |a_val, i| { const ty = a_struct_obj.types[i]; const b_val = b_struct_obj.values[i]; - if (a_val.tag() == .unreachable_value) { - if (b_val.tag() == .unreachable_value) { + if (a_val.ip_index == .unreachable_value) { + if (b_val.ip_index == .unreachable_value) { continue; } else { return false; } } else { - if (b_val.tag() == .unreachable_value) { + if (b_val.ip_index == .unreachable_value) { return false; } else { if (!Value.eql(a_val, b_val, ty, mod)) return false; @@ -750,7 +750,7 @@ pub const Type = struct { for (tuple.types, 0..) |field_ty, i| { hashWithHasher(field_ty, hasher, mod); const field_val = tuple.values[i]; - if (field_val.tag() == .unreachable_value) continue; + if (field_val.ip_index == .unreachable_value) continue; field_val.hash(field_ty, hasher, mod); } }, @@ -764,7 +764,7 @@ pub const Type = struct { const field_val = struct_obj.values[i]; hasher.update(field_name); hashWithHasher(field_ty, hasher, mod); - if (field_val.tag() == .unreachable_value) continue; + if (field_val.ip_index == .unreachable_value) continue; field_val.hash(field_ty, hasher, mod); } }, @@ -1139,11 +1139,11 @@ pub const Type = struct { for (tuple.types, 0..) |field_ty, i| { if (i != 0) try writer.writeAll(", "); const val = tuple.values[i]; - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.writeAll("comptime "); } try field_ty.dump("", .{}, writer); - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.print(" = {}", .{val.fmtDebug()}); } } @@ -1156,13 +1156,13 @@ pub const Type = struct { for (anon_struct.types, 0..) |field_ty, i| { if (i != 0) try writer.writeAll(", "); const val = anon_struct.values[i]; - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.writeAll("comptime "); } try writer.writeAll(anon_struct.names[i]); try writer.writeAll(": "); try field_ty.dump("", .{}, writer); - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.print(" = {}", .{val.fmtDebug()}); } } @@ -1408,11 +1408,11 @@ pub const Type = struct { for (tuple.types, 0..) |field_ty, i| { if (i != 0) try writer.writeAll(", "); const val = tuple.values[i]; - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.writeAll("comptime "); } try print(field_ty, writer, mod); - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); } } @@ -1425,7 +1425,7 @@ pub const Type = struct { for (anon_struct.types, 0..) |field_ty, i| { if (i != 0) try writer.writeAll(", "); const val = anon_struct.values[i]; - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.writeAll("comptime "); } try writer.writeAll(anon_struct.names[i]); @@ -1433,7 +1433,7 @@ pub const Type = struct { try print(field_ty, writer, mod); - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); } } @@ -1770,7 +1770,7 @@ pub const Type = struct { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { const val = tuple.values[i]; - if (val.tag() != .unreachable_value) continue; // comptime field + if (val.ip_index != .unreachable_value) continue; // comptime field if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } return false; @@ -2283,7 +2283,7 @@ pub const Type = struct { var big_align: u32 = 0; for (tuple.types, 0..) |field_ty, i| { const val = tuple.values[i]; - if (val.tag() != .unreachable_value) continue; // comptime field + if (val.ip_index != .unreachable_value) continue; // comptime field if (!(field_ty.hasRuntimeBits(mod))) continue; switch (try field_ty.abiAlignmentAdvanced(mod, strat)) { @@ -3845,7 +3845,7 @@ pub const Type = struct { => return null, .void => return Value.void, - .noreturn => return Value.initTag(.unreachable_value), + .noreturn => return Value.@"unreachable", .null => return Value.null, .undefined => return Value.undef, @@ -3896,7 +3896,7 @@ pub const Type = struct { .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.values, 0..) |val, i| { - const is_comptime = val.tag() != .unreachable_value; + const is_comptime = val.ip_index != .unreachable_value; if (is_comptime) continue; if (tuple.types[i].onePossibleValue(mod) != null) continue; return null; @@ -3919,7 +3919,7 @@ pub const Type = struct { return null; } switch (enum_full.fields.count()) { - 0 => return Value.initTag(.unreachable_value), + 0 => return Value.@"unreachable", 1 => if (enum_full.values.count() == 0) { return Value.zero; // auto-numbered } else { @@ -3931,7 +3931,7 @@ pub const Type = struct { .enum_simple => { const enum_simple = ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { - 0 => return Value.initTag(.unreachable_value), + 0 => return Value.@"unreachable", 1 => return Value.zero, else => return null, } @@ -3947,7 +3947,7 @@ pub const Type = struct { .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; const tag_val = union_obj.tag_ty.onePossibleValue(mod) orelse return null; - if (union_obj.fields.count() == 0) return Value.initTag(.unreachable_value); + if (union_obj.fields.count() == 0) return Value.@"unreachable"; const only_field = union_obj.fields.values()[0]; const val_val = only_field.ty.onePossibleValue(mod) orelse return null; _ = tag_val; @@ -4075,7 +4075,7 @@ pub const Type = struct { .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].tag() != .unreachable_value; + const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; if (!have_comptime_val and field_ty.comptimeOnly(mod)) return true; } return false; @@ -4514,7 +4514,7 @@ pub const Type = struct { .tuple => { const tuple = ty.castTag(.tuple).?.data; const val = tuple.values[index]; - if (val.tag() == .unreachable_value) { + if (val.ip_index == .unreachable_value) { return tuple.types[index].onePossibleValue(mod); } else { return val; @@ -4523,7 +4523,7 @@ pub const Type = struct { .anon_struct => { const anon_struct = ty.castTag(.anon_struct).?.data; const val = anon_struct.values[index]; - if (val.tag() == .unreachable_value) { + if (val.ip_index == .unreachable_value) { return anon_struct.types[index].onePossibleValue(mod); } else { return val; @@ -4544,12 +4544,12 @@ pub const Type = struct { .tuple => { const tuple = ty.castTag(.tuple).?.data; const val = tuple.values[index]; - return val.tag() != .unreachable_value; + return val.ip_index != .unreachable_value; }, .anon_struct => { const anon_struct = ty.castTag(.anon_struct).?.data; const val = anon_struct.values[index]; - return val.tag() != .unreachable_value; + return val.ip_index != .unreachable_value; }, else => unreachable, } @@ -4647,7 +4647,7 @@ pub const Type = struct { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { + if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { // comptime field if (i == index) return offset; continue; diff --git a/src/value.zig b/src/value.zig index 6f7210c884cc..f1d706aa093d 100644 --- a/src/value.zig +++ b/src/value.zig @@ -33,13 +33,10 @@ pub const Value = struct { // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - undef, zero, one, - unreachable_value, /// The only possible value for a particular type, which is stored externally. the_only_possible_value, - null_value, empty_struct_value, empty_array, // See last_no_payload_tag below. @@ -132,14 +129,11 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .undef, .zero, .one, - .unreachable_value, .the_only_possible_value, .empty_struct_value, .empty_array, - .null_value, => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), .int_big_positive, @@ -287,13 +281,10 @@ pub const Value = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .undef, .zero, .one, - .unreachable_value, .the_only_possible_value, .empty_array, - .null_value, .empty_struct_value, => unreachable, @@ -522,7 +513,7 @@ pub const Value = struct { ) !void { comptime assert(fmt.len == 0); if (start_val.ip_index != .none) { - try out_stream.print("(interned {d})", .{@enumToInt(start_val.ip_index)}); + try out_stream.print("(interned: {})", .{start_val.ip_index}); return; } var val = start_val; @@ -534,11 +525,8 @@ pub const Value = struct { .@"union" => { return out_stream.writeAll("(union value)"); }, - .null_value => return out_stream.writeAll("null"), - .undef => return out_stream.writeAll("undefined"), .zero => return out_stream.writeAll("0"), .one => return out_stream.writeAll("1"), - .unreachable_value => return out_stream.writeAll("unreachable"), .the_only_possible_value => return out_stream.writeAll("(the only possible value)"), .ty => return val.castTag(.ty).?.data.dump("", options, out_stream), .lazy_align => { @@ -811,8 +799,9 @@ pub const Value = struct { switch (val.ip_index) { .bool_false => return BigIntMutable.init(&space.limbs, 0).toConst(), .bool_true => return BigIntMutable.init(&space.limbs, 1).toConst(), + .undef => unreachable, + .null_value => return BigIntMutable.init(&space.limbs, 0).toConst(), .none => switch (val.tag()) { - .null_value, .zero, .the_only_possible_value, // i0, u0 => return BigIntMutable.init(&space.limbs, 0).toConst(), @@ -832,8 +821,6 @@ pub const Value = struct { .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt(), .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt(), - .undef => unreachable, - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -880,6 +867,7 @@ pub const Value = struct { switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, + .undef => unreachable, .none => switch (val.tag()) { .zero, .the_only_possible_value, // i0, u0 @@ -892,8 +880,6 @@ pub const Value = struct { .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(u64) catch null, .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null, - .undef => unreachable, - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -913,9 +899,9 @@ pub const Value = struct { else => return null, }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| return int.big_int.to(u64) catch null, - else => unreachable, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| int.big_int.to(u64) catch null, + else => null, }, } } @@ -930,6 +916,7 @@ pub const Value = struct { switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, + .undef => unreachable, .none => switch (val.tag()) { .zero, .the_only_possible_value, // i0, u0 @@ -951,7 +938,6 @@ pub const Value = struct { return @intCast(i64, ty.abiSize(mod)); }, - .undef => unreachable, else => unreachable, }, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { @@ -2032,8 +2018,7 @@ pub const Value = struct { const a_tag = a.tag(); const b_tag = b.tag(); if (a_tag == b_tag) switch (a_tag) { - .undef => return true, - .null_value, .the_only_possible_value, .empty_struct_value => return true, + .the_only_possible_value, .empty_struct_value => return true, .enum_literal => { const a_name = a.castTag(.enum_literal).?.data; const b_name = b.castTag(.enum_literal).?.data; @@ -2162,9 +2147,7 @@ pub const Value = struct { return eqlAdvanced(a_union.val, active_field_ty, b_union.val, active_field_ty, mod, opt_sema); }, else => {}, - } else if (b_tag == .null_value or b_tag == .@"error") { - return false; - } else if (a_tag == .undef or b_tag == .undef) { + } else if (b_tag == .@"error") { return false; } @@ -2283,7 +2266,7 @@ pub const Value = struct { if (a_nan) return true; return a_float == b_float; }, - .Optional => if (a_tag != .null_value and b_tag == .opt_payload) { + .Optional => if (b_tag == .opt_payload) { var sub_pl: Payload.SubValue = .{ .base = .{ .tag = b.tag() }, .data = a, @@ -2301,7 +2284,7 @@ pub const Value = struct { }, else => {}, } - if (a_tag == .null_value or a_tag == .@"error") return false; + if (a_tag == .@"error") return false; return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq); } @@ -2642,7 +2625,6 @@ pub const Value = struct { .zero, .one, - .null_value, .int_u64, .int_i64, .int_big_positive, @@ -2717,102 +2699,108 @@ pub const Value = struct { arena: ?Allocator, buffer: *ElemValueBuffer, ) error{OutOfMemory}!Value { - switch (val.tag()) { - // This is the case of accessing an element of an undef array. + switch (val.ip_index) { .undef => return Value.undef, - .empty_array => unreachable, // out of bounds array index - .empty_struct_value => unreachable, // out of bounds array index + .none => switch (val.tag()) { + // This is the case of accessing an element of an undef array. + .empty_array => unreachable, // out of bounds array index + .empty_struct_value => unreachable, // out of bounds array index - .empty_array_sentinel => { - assert(index == 0); // The only valid index for an empty array with sentinel. - return val.castTag(.empty_array_sentinel).?.data; - }, + .empty_array_sentinel => { + assert(index == 0); // The only valid index for an empty array with sentinel. + return val.castTag(.empty_array_sentinel).?.data; + }, - .bytes => { - const byte = val.castTag(.bytes).?.data[index]; - if (arena) |a| { - return Tag.int_u64.create(a, byte); - } else { - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = byte, - }; - return initPayload(&buffer.base); - } - }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const byte = bytes[index]; - if (arena) |a| { - return Tag.int_u64.create(a, byte); - } else { - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = byte, - }; - return initPayload(&buffer.base); - } - }, + .bytes => { + const byte = val.castTag(.bytes).?.data[index]; + if (arena) |a| { + return Tag.int_u64.create(a, byte); + } else { + buffer.* = .{ + .base = .{ .tag = .int_u64 }, + .data = byte, + }; + return initPayload(&buffer.base); + } + }, + .str_lit => { + const str_lit = val.castTag(.str_lit).?.data; + const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const byte = bytes[index]; + if (arena) |a| { + return Tag.int_u64.create(a, byte); + } else { + buffer.* = .{ + .base = .{ .tag = .int_u64 }, + .data = byte, + }; + return initPayload(&buffer.base); + } + }, - // No matter the index; all the elements are the same! - .repeated => return val.castTag(.repeated).?.data, + // No matter the index; all the elements are the same! + .repeated => return val.castTag(.repeated).?.data, - .aggregate => return val.castTag(.aggregate).?.data[index], - .slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(mod, index, arena, buffer), + .aggregate => return val.castTag(.aggregate).?.data[index], + .slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(mod, index, arena, buffer), - .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValueAdvanced(mod, index, arena, buffer), - .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValueAdvanced(mod, index, arena, buffer), - .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValueAdvanced(mod, index, arena, buffer), - .elem_ptr => { - const data = val.castTag(.elem_ptr).?.data; - return data.array_ptr.elemValueAdvanced(mod, index + data.index, arena, buffer); - }, - .field_ptr => { - const data = val.castTag(.field_ptr).?.data; - if (data.container_ptr.pointerDecl()) |decl_index| { - const container_decl = mod.declPtr(decl_index); - const field_type = data.container_ty.structFieldType(data.field_index); - const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); - return field_val.elemValueAdvanced(mod, index, arena, buffer); - } else unreachable; - }, + .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValueAdvanced(mod, index, arena, buffer), + .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValueAdvanced(mod, index, arena, buffer), + .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValueAdvanced(mod, index, arena, buffer), + .elem_ptr => { + const data = val.castTag(.elem_ptr).?.data; + return data.array_ptr.elemValueAdvanced(mod, index + data.index, arena, buffer); + }, + .field_ptr => { + const data = val.castTag(.field_ptr).?.data; + if (data.container_ptr.pointerDecl()) |decl_index| { + const container_decl = mod.declPtr(decl_index); + const field_type = data.container_ty.structFieldType(data.field_index); + const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); + return field_val.elemValueAdvanced(mod, index, arena, buffer); + } else unreachable; + }, - // The child type of arrays which have only one possible value need - // to have only one possible value itself. - .the_only_possible_value => return val, + // The child type of arrays which have only one possible value need + // to have only one possible value itself. + .the_only_possible_value => return val, - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), + .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), + .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), - .opt_payload => return val.castTag(.opt_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), - .eu_payload => return val.castTag(.eu_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), + .opt_payload => return val.castTag(.opt_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), + .eu_payload => return val.castTag(.eu_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), + else => unreachable, + }, else => unreachable, } } /// Returns true if a Value is backed by a variable pub fn isVariable(val: Value, mod: *Module) bool { - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr.isVariable(mod), - .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod), - .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isVariable(mod), - .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isVariable(mod), - .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isVariable(mod), - .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isVariable(mod), - .decl_ref => { - const decl = mod.declPtr(val.castTag(.decl_ref).?.data); - assert(decl.has_tv); - return decl.val.isVariable(mod); - }, - .decl_ref_mut => { - const decl = mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index); - assert(decl.has_tv); - return decl.val.isVariable(mod); - }, + return switch (val.ip_index) { + .none => switch (val.tag()) { + .slice => val.castTag(.slice).?.data.ptr.isVariable(mod), + .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod), + .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isVariable(mod), + .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isVariable(mod), + .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isVariable(mod), + .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isVariable(mod), + .decl_ref => { + const decl = mod.declPtr(val.castTag(.decl_ref).?.data); + assert(decl.has_tv); + return decl.val.isVariable(mod); + }, + .decl_ref_mut => { + const decl = mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index); + assert(decl.has_tv); + return decl.val.isVariable(mod); + }, - .variable => true, + .variable => true, + else => false, + }, else => false, }; } @@ -2878,39 +2866,46 @@ pub const Value = struct { } pub fn fieldValue(val: Value, ty: Type, mod: *const Module, index: usize) Value { - switch (val.tag()) { - .aggregate => { - const field_values = val.castTag(.aggregate).?.data; - return field_values[index]; - }, - .@"union" => { - const payload = val.castTag(.@"union").?.data; - // TODO assert the tag is correct - return payload.val; - }, + switch (val.ip_index) { + .undef => return Value.undef, + .none => switch (val.tag()) { + .aggregate => { + const field_values = val.castTag(.aggregate).?.data; + return field_values[index]; + }, + .@"union" => { + const payload = val.castTag(.@"union").?.data; + // TODO assert the tag is correct + return payload.val; + }, - .the_only_possible_value => return ty.onePossibleValue(mod).?, + .the_only_possible_value => return ty.onePossibleValue(mod).?, - .empty_struct_value => { - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - return tuple.values[index]; - } - if (ty.structFieldValueComptime(mod, index)) |some| { - return some; - } - unreachable; - }, - .undef => return Value.undef, + .empty_struct_value => { + if (ty.isSimpleTupleOrAnonStruct()) { + const tuple = ty.tupleFields(); + return tuple.values[index]; + } + if (ty.structFieldValueComptime(mod, index)) |some| { + return some; + } + unreachable; + }, + else => unreachable, + }, else => unreachable, } } pub fn unionTag(val: Value) Value { - switch (val.tag()) { - .undef, .enum_field_index => return val, - .@"union" => return val.castTag(.@"union").?.data.tag, + switch (val.ip_index) { + .undef => return val, + .none => switch (val.tag()) { + .enum_field_index => return val, + .@"union" => return val.castTag(.@"union").?.data.tag, + else => unreachable, + }, else => unreachable, } } @@ -2946,15 +2941,15 @@ pub const Value = struct { }); } - pub fn isUndef(self: Value) bool { - return self.tag() == .undef; + pub fn isUndef(val: Value) bool { + return val.ip_index == .undef; } /// TODO: check for cases such as array that is not marked undef but all the element /// values are marked undef, or struct that is not marked undef but all fields are marked /// undef, etc. - pub fn isUndefDeep(self: Value) bool { - return self.isUndef(); + pub fn isUndefDeep(val: Value) bool { + return val.isUndef(); } /// Returns true if any value contained in `self` is undefined. @@ -2962,27 +2957,29 @@ pub const Value = struct { /// values are marked undef, or struct that is not marked undef but all fields are marked /// undef, etc. pub fn anyUndef(self: Value, mod: *Module) bool { - switch (self.tag()) { - .slice => { - const payload = self.castTag(.slice).?; - const len = payload.data.len.toUnsignedInt(mod); - - var elem_value_buf: ElemValueBuffer = undefined; - var i: usize = 0; - while (i < len) : (i += 1) { - const elem_val = payload.data.ptr.elemValueBuffer(mod, i, &elem_value_buf); - if (elem_val.anyUndef(mod)) return true; - } - }, + switch (self.ip_index) { + .undef => return true, + .none => switch (self.tag()) { + .slice => { + const payload = self.castTag(.slice).?; + const len = payload.data.len.toUnsignedInt(mod); + + var elem_value_buf: ElemValueBuffer = undefined; + var i: usize = 0; + while (i < len) : (i += 1) { + const elem_val = payload.data.ptr.elemValueBuffer(mod, i, &elem_value_buf); + if (elem_val.anyUndef(mod)) return true; + } + }, - .aggregate => { - const payload = self.castTag(.aggregate).?; - for (payload.data) |val| { - if (val.anyUndef(mod)) return true; - } + .aggregate => { + const payload = self.castTag(.aggregate).?; + for (payload.data) |val| { + if (val.anyUndef(mod)) return true; + } + }, + else => {}, }, - - .undef => return true, else => {}, } @@ -2992,30 +2989,33 @@ pub const Value = struct { /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. pub fn isNull(self: Value, mod: *const Module) bool { - return switch (self.tag()) { + return switch (self.ip_index) { + .undef => unreachable, + .unreachable_value => unreachable, .null_value => true, - .opt_payload => false, + .none => switch (self.tag()) { + .opt_payload => false, - // If it's not one of those two tags then it must be a C pointer value, - // in which case the value 0 is null and other values are non-null. + // If it's not one of those two tags then it must be a C pointer value, + // in which case the value 0 is null and other values are non-null. - .zero, - .the_only_possible_value, - => true, + .zero, + .the_only_possible_value, + => true, - .one => false, + .one => false, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, - => self.orderAgainstZero(mod).compare(.eq), + .int_u64, + .int_i64, + .int_big_positive, + .int_big_negative, + => self.orderAgainstZero(mod).compare(.eq), - .undef => unreachable, - .unreachable_value => unreachable, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, + else => false, + }, else => false, }; } @@ -3025,18 +3025,21 @@ pub const Value = struct { /// something is an error or not because it works without having to figure out the /// string. pub fn getError(self: Value) ?[]const u8 { - return switch (self.tag()) { - .@"error" => self.castTag(.@"error").?.data.name, - .int_u64 => @panic("TODO"), - .int_i64 => @panic("TODO"), - .int_big_positive => @panic("TODO"), - .int_big_negative => @panic("TODO"), - .one => @panic("TODO"), + return switch (self.ip_index) { .undef => unreachable, .unreachable_value => unreachable, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, + .none => switch (self.tag()) { + .@"error" => self.castTag(.@"error").?.data.name, + .int_u64 => @panic("TODO"), + .int_i64 => @panic("TODO"), + .int_big_positive => @panic("TODO"), + .int_big_negative => @panic("TODO"), + .one => @panic("TODO"), + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, + else => null, + }, else => null, }; } @@ -3044,13 +3047,16 @@ pub const Value = struct { /// Assumes the type is an error union. Returns true if and only if the value is /// the error union payload, not an error. pub fn errorUnionIsPayload(val: Value) bool { - return switch (val.tag()) { - .eu_payload => true, - else => false, - + return switch (val.ip_index) { .undef => unreachable, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, + .none => switch (val.tag()) { + .eu_payload => true, + else => false, + + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, + }, + else => false, }; } @@ -3065,17 +3071,20 @@ pub const Value = struct { /// Valid for all types. Asserts the value is not undefined. pub fn isFloat(self: Value) bool { - return switch (self.tag()) { + return switch (self.ip_index) { .undef => unreachable, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, + .none => switch (self.tag()) { + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, - .float_16, - .float_32, - .float_64, - .float_80, - .float_128, - => true, + .float_16, + .float_32, + .float_64, + .float_80, + .float_128, + => true, + else => false, + }, else => false, }; } @@ -3102,40 +3111,44 @@ pub const Value = struct { pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { const target = mod.getTarget(); - switch (val.tag()) { - .undef, .zero, .one => return val, - .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 - .int_u64 => { - return intToFloatInner(val.castTag(.int_u64).?.data, arena, float_ty, target); - }, - .int_i64 => { - return intToFloatInner(val.castTag(.int_i64).?.data, arena, float_ty, target); - }, - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const float = bigIntToFloat(limbs, true); - return floatToValue(float, arena, float_ty, target); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - const float = bigIntToFloat(limbs, false); - return floatToValue(float, arena, float_ty, target); - }, - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); - } else { - return intToFloatInner(ty.abiAlignment(mod), arena, float_ty, target); - } - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); - } else { - return intToFloatInner(ty.abiSize(mod), arena, float_ty, target); - } + switch (val.ip_index) { + .undef => return val, + .none => switch (val.tag()) { + .zero, .one => return val, + .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 + .int_u64 => { + return intToFloatInner(val.castTag(.int_u64).?.data, arena, float_ty, target); + }, + .int_i64 => { + return intToFloatInner(val.castTag(.int_i64).?.data, arena, float_ty, target); + }, + .int_big_positive => { + const limbs = val.castTag(.int_big_positive).?.data; + const float = bigIntToFloat(limbs, true); + return floatToValue(float, arena, float_ty, target); + }, + .int_big_negative => { + const limbs = val.castTag(.int_big_negative).?.data; + const float = bigIntToFloat(limbs, false); + return floatToValue(float, arena, float_ty, target); + }, + .lazy_align => { + const ty = val.castTag(.lazy_align).?.data; + if (opt_sema) |sema| { + return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); + } else { + return intToFloatInner(ty.abiAlignment(mod), arena, float_ty, target); + } + }, + .lazy_size => { + const ty = val.castTag(.lazy_size).?.data; + if (opt_sema) |sema| { + return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); + } else { + return intToFloatInner(ty.abiSize(mod), arena, float_ty, target); + } + }, + else => unreachable, }, else => unreachable, } @@ -3381,7 +3394,7 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; if (ty.zigTypeTag(mod) == .ComptimeInt) { return intMul(lhs, rhs, ty, arena, mod); @@ -3492,7 +3505,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (val.isUndef()) return Value.initTag(.undef); + if (val.isUndef()) return Value.undef; const info = ty.intInfo(mod); @@ -3532,7 +3545,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -3568,7 +3581,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); @@ -3598,7 +3611,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -3633,7 +3646,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -5393,11 +5406,12 @@ pub const Value = struct { .ip_index = .none, .legacy = .{ .ptr_otherwise = &negative_one_payload.base }, }; - pub const undef = initTag(.undef); + pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; - pub const @"null" = initTag(.null_value); + pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined }; pub const @"false": Value = .{ .ip_index = .bool_false, .legacy = undefined }; pub const @"true": Value = .{ .ip_index = .bool_true, .legacy = undefined }; + pub const @"unreachable": Value = .{ .ip_index = .unreachable_value, .legacy = undefined }; pub const generic_poison: Value = .{ .ip_index = .generic_poison, .legacy = undefined }; pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type, .legacy = undefined }; From 41cdcd5486ba10dcd21dc45cb8470c556b7497dd Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 4 May 2023 21:57:55 -0700 Subject: [PATCH 022/205] stage2: add a few more Value checks for InternPool --- src/Module.zig | 9 ++++++--- src/type.zig | 2 +- src/value.zig | 34 ++++++++++++++++++++-------------- 3 files changed, 27 insertions(+), 18 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index b1cbd8829726..bfc06ac5abec 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4903,9 +4903,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr; }; decl.@"addrspace" = blk: { - const addrspace_ctx: Sema.AddressSpaceContext = switch (decl_tv.val.tag()) { - .function, .extern_fn => .function, - .variable => .variable, + const addrspace_ctx: Sema.AddressSpaceContext = switch (decl_tv.val.ip_index) { + .none => switch (decl_tv.val.tag()) { + .function, .extern_fn => .function, + .variable => .variable, + else => .constant, + }, else => .constant, }; diff --git a/src/type.zig b/src/type.zig index 8cffddb31cb9..4840bca6e7f6 100644 --- a/src/type.zig +++ b/src/type.zig @@ -252,7 +252,7 @@ pub const Type = struct { } pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() { - assert(self.ip_index == .none); + if (self.ip_index != .none) return null; if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; diff --git a/src/value.zig b/src/value.zig index f1d706aa093d..c2c37ba68b00 100644 --- a/src/value.zig +++ b/src/value.zig @@ -258,7 +258,7 @@ pub const Value = struct { } pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() { - assert(self.ip_index == .none); + if (self.ip_index != .none) return null; if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; @@ -2806,24 +2806,30 @@ pub const Value = struct { } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - return switch (val.tag()) { - .variable => false, + return switch (val.ip_index) { + .none => switch (val.tag()) { + .variable => false, + else => val.isPtrToThreadLocalInner(mod), + }, else => val.isPtrToThreadLocalInner(mod), }; } fn isPtrToThreadLocalInner(val: Value, mod: *Module) bool { - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr.isPtrToThreadLocalInner(mod), - .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isPtrToThreadLocalInner(mod), - .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isPtrToThreadLocalInner(mod), - .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .decl_ref => mod.declPtr(val.castTag(.decl_ref).?.data).val.isPtrToThreadLocalInner(mod), - .decl_ref_mut => mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.isPtrToThreadLocalInner(mod), - - .variable => val.castTag(.variable).?.data.is_threadlocal, + return switch (val.ip_index) { + .none => switch (val.tag()) { + .slice => val.castTag(.slice).?.data.ptr.isPtrToThreadLocalInner(mod), + .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isPtrToThreadLocalInner(mod), + .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isPtrToThreadLocalInner(mod), + .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), + .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), + .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), + .decl_ref => mod.declPtr(val.castTag(.decl_ref).?.data).val.isPtrToThreadLocalInner(mod), + .decl_ref_mut => mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.isPtrToThreadLocalInner(mod), + + .variable => val.castTag(.variable).?.data.is_threadlocal, + else => false, + }, else => false, }; } From 75cf06c187d9d0288c2ea31f34b18c3a7da4bd1d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 13:26:01 -0700 Subject: [PATCH 023/205] std.mem.alignForwardGeneric: manually inline the assertions This matches more directly the documentation comments, and makes it more obvious what went wrong when an assertion fails. --- lib/std/mem.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 212d09a1a839..d6ca4a9ea1b3 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -4226,7 +4226,8 @@ pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize { /// The alignment must be a power of 2 and greater than 0. /// Asserts that rounding up the address does not cause integer overflow. pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T { - assert(isValidAlignGeneric(T, alignment)); + assert(alignment > 0); + assert(std.math.isPowerOfTwo(alignment)); return alignBackwardGeneric(T, addr + (alignment - 1), alignment); } From ac07ddadeb36019c262b4f28a4fa3884f6f50b32 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 13:26:58 -0700 Subject: [PATCH 024/205] InternPool: enhance integer values The Key struct now has a Storage tagged union which can store a u64, i64, or big int. This is needed so that indexToKey can be implemented for integers stored compactly in the data structure. --- src/InternPool.zig | 244 ++++++++++++++++++++++++++++++--------------- src/Sema.zig | 6 +- src/value.zig | 142 ++++++++++++++++---------- 3 files changed, 257 insertions(+), 135 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 7328c74b4f4b..1da0572bd4c4 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -11,6 +11,7 @@ const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const BigIntConst = std.math.big.int.Const; +const BigIntMutable = std.math.big.int.Mutable; const InternPool = @This(); const DeclIndex = enum(u32) { _ }; @@ -50,10 +51,7 @@ pub const Key = union(enum) { /// Index into the string table bytes. lib_name: u32, }, - int: struct { - ty: Index, - big_int: BigIntConst, - }, + int: Key.Int, enum_tag: struct { ty: Index, tag: BigIntConst, @@ -110,6 +108,32 @@ pub const Key = union(enum) { child: Index, }; + pub const Int = struct { + ty: Index, + storage: Storage, + + pub const Storage = union(enum) { + u64: u64, + i64: i64, + big_int: BigIntConst, + + /// Big enough to fit any non-BigInt value + pub const BigIntSpace = struct { + /// The +1 is headroom so that operations such as incrementing once + /// or decrementing once are possible without using an allocator. + limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb, + }; + + pub fn toBigInt(storage: Storage, space: *BigIntSpace) BigIntConst { + return switch (storage) { + .big_int => |x| x, + .u64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), + .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), + }; + } + }; + }; + pub fn hash32(key: Key) u32 { return @truncate(u32, key.hash64()); } @@ -137,9 +161,13 @@ pub const Key = union(enum) { => |info| std.hash.autoHash(hasher, info), .int => |int| { + // Canonicalize all integers by converting them to BigIntConst. + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + std.hash.autoHash(hasher, int.ty); - std.hash.autoHash(hasher, int.big_int.positive); - for (int.big_int.limbs) |limb| std.hash.autoHash(hasher, limb); + std.hash.autoHash(hasher, big_int.positive); + for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb); }, .enum_tag => |enum_tag| { @@ -573,42 +601,27 @@ pub const static_keys = [_]Key{ .{ .int = .{ .ty = .comptime_int_type, - .big_int = .{ - .limbs = &.{0}, - .positive = true, - }, + .storage = .{ .u64 = 0 }, } }, .{ .int = .{ .ty = .usize_type, - .big_int = .{ - .limbs = &.{0}, - .positive = true, - }, + .storage = .{ .u64 = 0 }, } }, .{ .int = .{ .ty = .u8_type, - .big_int = .{ - .limbs = &.{0}, - .positive = true, - }, + .storage = .{ .u64 = 0 }, } }, .{ .int = .{ .ty = .comptime_int_type, - .big_int = .{ - .limbs = &.{1}, - .positive = true, - }, + .storage = .{ .u64 = 1 }, } }, .{ .int = .{ .ty = .usize_type, - .big_int = .{ - .limbs = &.{1}, - .positive = true, - }, + .storage = .{ .u64 = 1 }, } }, .{ .enum_tag = .{ @@ -680,19 +693,19 @@ pub const Tag = enum(u8) { simple_internal, /// Type: u32 /// data is integer value - int_small_u32, + int_u32, /// Type: i32 /// data is integer value bitcasted to u32. - int_small_i32, + int_i32, /// A usize that fits in 32 bits. /// data is integer value. - int_small_usize, + int_usize, /// A comptime_int that fits in a u32. /// data is integer value. - int_small_comptime_unsigned, + int_comptime_int_u32, /// A comptime_int that fits in an i32. /// data is integer value bitcasted to u32. - int_small_comptime_signed, + int_comptime_int_i32, /// A positive integer value. /// data is a limbs index to Int. int_positive, @@ -932,11 +945,26 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_error_union => @panic("TODO"), .type_enum_simple => @panic("TODO"), .simple_internal => @panic("TODO"), - .int_small_u32 => @panic("TODO"), - .int_small_i32 => @panic("TODO"), - .int_small_usize => @panic("TODO"), - .int_small_comptime_unsigned => @panic("TODO"), - .int_small_comptime_signed => @panic("TODO"), + .int_u32 => return .{ .int = .{ + .ty = .u32_type, + .storage = .{ .u64 = data }, + } }, + .int_i32 => return .{ .int = .{ + .ty = .i32_type, + .storage = .{ .i64 = @bitCast(i32, data) }, + } }, + .int_usize => return .{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = data }, + } }, + .int_comptime_int_u32 => return .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .u64 = data }, + } }, + .int_comptime_int_i32 => return .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .i64 = @bitCast(i32, data) }, + } }, .int_positive => @panic("TODO"), .int_negative => @panic("TODO"), .enum_tag_positive => @panic("TODO"), @@ -1041,54 +1069,114 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .int => |int| b: { switch (int.ty) { - .u32_type => { - if (int.big_int.fits(u32)) { - ip.items.appendAssumeCapacity(.{ - .tag = .int_small_u32, - .data = int.big_int.to(u32) catch unreachable, - }); - break :b; - } + .u32_type => switch (int.storage) { + .big_int => |big_int| { + if (big_int.to(u32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u32, + .data = casted, + }); + break :b; + } else |_| {} + }, + inline .u64, .i64 => |x| { + if (std.math.cast(u32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u32, + .data = casted, + }); + break :b; + } + }, }, - .i32_type => { - if (int.big_int.fits(i32)) { - ip.items.appendAssumeCapacity(.{ - .tag = .int_small_i32, - .data = @bitCast(u32, int.big_int.to(i32) catch unreachable), - }); - break :b; - } + .i32_type => switch (int.storage) { + .big_int => |big_int| { + if (big_int.to(i32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_i32, + .data = @bitCast(u32, casted), + }); + break :b; + } else |_| {} + }, + inline .u64, .i64 => |x| { + if (std.math.cast(i32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u32, + .data = @bitCast(u32, casted), + }); + break :b; + } + }, }, - .usize_type => { - if (int.big_int.fits(u32)) { - ip.items.appendAssumeCapacity(.{ - .tag = .int_small_usize, - .data = int.big_int.to(u32) catch unreachable, - }); - break :b; - } + .usize_type => switch (int.storage) { + .big_int => |big_int| { + if (big_int.to(u32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_usize, + .data = casted, + }); + break :b; + } else |_| {} + }, + inline .u64, .i64 => |x| { + if (std.math.cast(u32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_usize, + .data = casted, + }); + break :b; + } + }, }, - .comptime_int_type => { - if (int.big_int.fits(u32)) { - ip.items.appendAssumeCapacity(.{ - .tag = .int_small_comptime_unsigned, - .data = int.big_int.to(u32) catch unreachable, - }); - break :b; - } - if (int.big_int.fits(i32)) { - ip.items.appendAssumeCapacity(.{ - .tag = .int_small_comptime_signed, - .data = @bitCast(u32, int.big_int.to(i32) catch unreachable), - }); - break :b; - } + .comptime_int_type => switch (int.storage) { + .big_int => |big_int| { + if (big_int.to(u32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_comptime_int_u32, + .data = casted, + }); + break :b; + } else |_| {} + if (big_int.to(i32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_comptime_int_i32, + .data = @bitCast(u32, casted), + }); + break :b; + } else |_| {} + }, + inline .u64, .i64 => |x| { + if (std.math.cast(u32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_comptime_int_u32, + .data = casted, + }); + break :b; + } + if (std.math.cast(i32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_comptime_int_i32, + .data = @bitCast(u32, casted), + }); + break :b; + } + }, }, else => {}, } - - const tag: Tag = if (int.big_int.positive) .int_positive else .int_negative; - try addInt(ip, gpa, int.ty, tag, int.big_int.limbs); + switch (int.storage) { + .big_int => |big_int| { + const tag: Tag = if (big_int.positive) .int_positive else .int_negative; + try addInt(ip, gpa, int.ty, tag, big_int.limbs); + }, + inline .i64, .u64 => |x| { + var buf: [2]usize = undefined; + const big_int = BigIntMutable.init(&buf, x).toConst(); + const tag: Tag = if (big_int.positive) .int_positive else .int_negative; + try addInt(ip, gpa, int.ty, tag, big_int.limbs); + }, + } }, .enum_tag => |enum_tag| { diff --git a/src/Sema.zig b/src/Sema.zig index 3406d0d80f52..37d6f3e90234 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1981,7 +1981,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( if (air_tags[i] == .constant) { const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; const val = sema.air_values.items[ty_pl.payload]; - if (val.tag() == .variable) return val; + if (val.tagIsVariable()) return val; } return opv; } @@ -5033,7 +5033,7 @@ fn storeToInferredAllocComptime( // There will be only one store_to_inferred_ptr because we are running at comptime. // The alloc will turn into a Decl. if (try sema.resolveMaybeUndefValAllowVariables(operand)) |operand_val| store: { - if (operand_val.tag() == .variable) break :store; + if (operand_val.tagIsVariable()) break :store; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); iac.data.decl_index = try anon_decl.finish( @@ -28125,7 +28125,7 @@ fn beginComptimePtrLoad( const is_mutable = ptr_val.tag() == .decl_ref_mut; const decl = sema.mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); - if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; + if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad; const layout_defined = decl.ty.hasWellDefinedLayout(mod); break :blk ComptimePtrLoadKit{ diff --git a/src/value.zig b/src/value.zig index c2c37ba68b00..588218f6703c 100644 --- a/src/value.zig +++ b/src/value.zig @@ -796,17 +796,17 @@ pub const Value = struct { mod: *const Module, opt_sema: ?*Sema, ) Module.CompileError!BigIntConst { - switch (val.ip_index) { - .bool_false => return BigIntMutable.init(&space.limbs, 0).toConst(), - .bool_true => return BigIntMutable.init(&space.limbs, 1).toConst(), + return switch (val.ip_index) { + .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), + .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), .undef => unreachable, - .null_value => return BigIntMutable.init(&space.limbs, 0).toConst(), + .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), .none => switch (val.tag()) { .zero, .the_only_possible_value, // i0, u0 - => return BigIntMutable.init(&space.limbs, 0).toConst(), + => BigIntMutable.init(&space.limbs, 0).toConst(), - .one => return BigIntMutable.init(&space.limbs, 1).toConst(), + .one => BigIntMutable.init(&space.limbs, 1).toConst(), .enum_field_index => { const index = val.castTag(.enum_field_index).?.data; @@ -816,10 +816,10 @@ pub const Value = struct { const sub_val = val.castTag(.runtime_value).?.data; return sub_val.toBigIntAdvanced(space, mod, opt_sema); }, - .int_u64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(), - .int_i64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(), - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt(), - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt(), + .int_u64 => BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(), + .int_i64 => BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(), + .int_big_positive => val.castTag(.int_big_positive).?.asBigInt(), + .int_big_negative => val.castTag(.int_big_negative).?.asBigInt(), .lazy_align => { const ty = val.castTag(.lazy_align).?.data; @@ -849,10 +849,10 @@ pub const Value = struct { else => unreachable, }, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| return int.big_int, + .int => |int| int.storage.toBigInt(space), else => unreachable, }, - } + }; } /// If the value fits in a u64, return it, otherwise null. @@ -900,7 +900,11 @@ pub const Value = struct { else => return null, }, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| int.big_int.to(u64) catch null, + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.to(u64) catch null, + .u64 => |x| x, + .i64 => |x| std.math.cast(u64, x), + }, else => null, }, } @@ -940,18 +944,22 @@ pub const Value = struct { else => unreachable, }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| return int.big_int.to(i64) catch unreachable, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.to(i64) catch unreachable, + .i64 => |x| x, + .u64 => |x| @intCast(i64, x), + }, else => unreachable, }, } } pub fn toBool(val: Value, mod: *const Module) bool { - switch (val.ip_index) { - .bool_true => return true, - .bool_false => return false, - .none => return switch (val.tag()) { + return switch (val.ip_index) { + .bool_true => true, + .bool_false => false, + .none => switch (val.tag()) { .one => true, .zero => false, @@ -968,10 +976,13 @@ pub const Value = struct { else => unreachable, }, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| return !int.big_int.eqZero(), + .int => |int| switch (int.storage) { + .big_int => |big_int| !big_int.eqZero(), + inline .u64, .i64 => |x| x != 0, + }, else => unreachable, }, - } + }; } fn isDeclRef(val: Value) bool { @@ -1483,12 +1494,12 @@ pub const Value = struct { pub fn clz(val: Value, ty: Type, mod: *const Module) u64 { const ty_bits = ty.intInfo(mod).bits; - switch (val.ip_index) { - .bool_false => return ty_bits, - .bool_true => return ty_bits - 1, + return switch (val.ip_index) { + .bool_false => ty_bits, + .bool_true => ty_bits - 1, .none => switch (val.tag()) { - .zero => return ty_bits, - .one => return ty_bits - 1, + .zero => ty_bits, + .one => ty_bits - 1, .int_u64 => { const big = @clz(val.castTag(.int_u64).?.data); @@ -1519,20 +1530,24 @@ pub const Value = struct { else => unreachable, }, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| return int.big_int.clz(ty_bits), + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.clz(ty_bits), + .u64 => |x| @clz(x) + ty_bits - 64, + .i64 => @panic("TODO implement i64 Value clz"), + }, else => unreachable, }, - } + }; } pub fn ctz(val: Value, ty: Type, mod: *const Module) u64 { const ty_bits = ty.intInfo(mod).bits; - switch (val.ip_index) { - .bool_false => return ty_bits, - .bool_true => return 0, + return switch (val.ip_index) { + .bool_false => ty_bits, + .bool_true => 0, .none => switch (val.tag()) { - .zero => return ty_bits, - .one => return 0, + .zero => ty_bits, + .one => 0, .int_u64 => { const big = @ctz(val.castTag(.int_u64).?.data); @@ -1563,10 +1578,17 @@ pub const Value = struct { else => unreachable, }, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| return int.big_int.ctz(), + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.ctz(), + .u64 => |x| { + const big = @ctz(x); + return if (big == 64) ty_bits else big; + }, + .i64 => @panic("TODO implement i64 Value ctz"), + }, else => unreachable, }, - } + }; } pub fn popCount(val: Value, ty: Type, mod: *const Module) u64 { @@ -1591,7 +1613,9 @@ pub const Value = struct { else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| { const info = ty.intInfo(mod); - return int.big_int.popCount(info.bits); + var buffer: Value.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + return @intCast(u64, big_int.popCount(info.bits)); }, else => unreachable, }, @@ -1641,23 +1665,23 @@ pub const Value = struct { /// Returns the number of bits the value requires to represent stored in twos complement form. pub fn intBitCountTwosComp(self: Value, mod: *const Module) usize { const target = mod.getTarget(); - switch (self.ip_index) { - .bool_false => return 0, - .bool_true => return 1, + return switch (self.ip_index) { + .bool_false => 0, + .bool_true => 1, .none => switch (self.tag()) { .zero, .the_only_possible_value, - => return 0, + => 0, - .one => return 1, + .one => 1, .int_u64 => { const x = self.castTag(.int_u64).?.data; if (x == 0) return 0; return @intCast(usize, std.math.log2(x) + 1); }, - .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(), - .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(), + .int_big_positive => self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(), + .int_big_negative => self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(), .decl_ref_mut, .comptime_field_ptr, @@ -1667,7 +1691,7 @@ pub const Value = struct { .variable, .eu_payload_ptr, .opt_payload_ptr, - => return target.ptrBitWidth(), + => target.ptrBitWidth(), else => { var buffer: BigIntSpace = undefined; @@ -1675,10 +1699,18 @@ pub const Value = struct { }, }, else => switch (mod.intern_pool.indexToKey(self.ip_index)) { - .int => |int| return int.big_int.bitCountTwosComp(), + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.bitCountTwosComp(), + .u64 => |x| if (x == 0) 0 else @intCast(usize, std.math.log2(x) + 1), + .i64 => { + var buffer: Value.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + return big_int.bitCountTwosComp(); + }, + }, else => unreachable, }, - } + }; } /// Converts an integer or a float to a float. May result in a loss of information. @@ -1798,8 +1830,11 @@ pub const Value = struct { else => unreachable, }, - else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) { - .int => |int| return int.big_int.orderAgainstScalar(0), + else => return switch (mod.intern_pool.indexToKey(lhs.ip_index)) { + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.orderAgainstScalar(0), + inline .u64, .i64 => |x| std.math.order(x, 0), + }, else => unreachable, }, } @@ -2777,6 +2812,10 @@ pub const Value = struct { } } + pub fn tagIsVariable(val: Value) bool { + return val.ip_index == .none and val.tag() == .variable; + } + /// Returns true if a Value is backed by a variable pub fn isVariable(val: Value, mod: *Module) bool { return switch (val.ip_index) { @@ -5399,12 +5438,7 @@ pub const Value = struct { }; }; - /// Big enough to fit any non-BigInt value - pub const BigIntSpace = struct { - /// The +1 is headroom so that operations such as incrementing once or decrementing once - /// are possible without using an allocator. - limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb, - }; + pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; pub const zero = initTag(.zero); pub const one = initTag(.one); From 2f05b1482a6f62658cc70c252d48a24a40404aa8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 14:57:23 -0700 Subject: [PATCH 025/205] Sema: update core comptime detection logic to be InternPool aware * Add some assertions to make sure instructions are not none. I tested all these with master branch as well and made sure the behavior tests still passed with the assertions intact (along with a handful of callsite updates). * Fix Sema.resolveMaybeUndefValAllowVariablesMaybeRuntime not noticing that interned values are comptime-known. This was causing all kinds of chaos. * Fix print_air writeType calling tag() without checking for ip_index --- src/Air.zig | 16 +++++++++++----- src/Liveness.zig | 4 ++-- src/Liveness/Verify.zig | 2 +- src/Sema.zig | 33 +++++++++++++++++++-------------- src/Zir.zig | 10 ++++++++-- src/print_air.zig | 10 ++++++---- 6 files changed, 47 insertions(+), 28 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 64212d3b9af3..3c04d17073c2 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -925,7 +925,7 @@ pub const Inst = struct { /// This Ref does not correspond to any AIR instruction or constant /// value and may instead be used as a sentinel to indicate null. - none = std.math.maxInt(u32), + none = @enumToInt(InternPool.Index.none), _, }; @@ -1461,11 +1461,12 @@ pub fn deinit(air: *Air, gpa: std.mem.Allocator) void { pub const ref_start_index: u32 = InternPool.static_len; -pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { - return @intToEnum(Air.Inst.Ref, ref_start_index + inst); +pub fn indexToRef(inst: Inst.Index) Inst.Ref { + return @intToEnum(Inst.Ref, ref_start_index + inst); } -pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { +pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { + assert(inst != .none); const ref_int = @enumToInt(inst); if (ref_int >= ref_start_index) { return ref_int - ref_start_index; @@ -1474,8 +1475,13 @@ pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { } } +pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index { + if (inst == .none) return null; + return refToIndex(inst); +} + /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const Module) ?Value { +pub fn value(air: Air, inst: Inst.Ref, mod: *const Module) ?Value { const ref_int = @enumToInt(inst); if (ref_int < ref_start_index) { const ip_index = @intToEnum(InternPool.Index, ref_int); diff --git a/src/Liveness.zig b/src/Liveness.zig index 19659940af3c..da705cfab80f 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -1268,7 +1268,7 @@ fn analyzeOperands( _ = data.live_set.remove(inst); for (operands) |op_ref| { - const operand = Air.refToIndex(op_ref) orelse continue; + const operand = Air.refToIndexAllowNone(op_ref) orelse continue; // Don't compute any liveness for constants switch (inst_tags[operand]) { @@ -1304,7 +1304,7 @@ fn analyzeOperands( while (i > 0) { i -= 1; const op_ref = operands[i]; - const operand = Air.refToIndex(op_ref) orelse continue; + const operand = Air.refToIndexAllowNone(op_ref) orelse continue; // Don't compute any liveness for constants switch (inst_tags[operand]) { diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index 7059fec5074b..dbdbf321740c 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -555,7 +555,7 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err } fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void { - const operand = Air.refToIndex(op_ref) orelse return; + const operand = Air.refToIndexAllowNone(op_ref) orelse return; switch (self.air.instructions.items(.tag)[operand]) { .constant, .const_ty, .interned => {}, else => { diff --git a/src/Sema.zig b/src/Sema.zig index 37d6f3e90234..8abe6484eec1 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -968,7 +968,7 @@ fn analyzeBodyInner( .int_big => try sema.zirIntBig(block, inst), .float => try sema.zirFloat(block, inst), .float128 => try sema.zirFloat128(block, inst), - .int_type => try sema.zirIntType(block, inst), + .int_type => try sema.zirIntType(inst), .is_non_err => try sema.zirIsNonErr(block, inst), .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), .ret_is_non_err => try sema.zirRetIsNonErr(block, inst), @@ -1694,7 +1694,7 @@ fn analyzeBodyInner( const extra = sema.code.extraData(Zir.Inst.DeferErrCode, inst_data.payload_index).data; const defer_body = sema.code.extra[extra.index..][0..extra.len]; const err_code = try sema.resolveInst(inst_data.err_code); - sema.inst_map.putAssumeCapacity(extra.remapped_err_code, err_code); + map.putAssumeCapacity(extra.remapped_err_code, err_code); const break_inst = sema.analyzeBodyInner(block, defer_body) catch |err| switch (err) { error.ComptimeBreak => sema.comptime_break_inst, else => |e| return e, @@ -1730,7 +1730,16 @@ fn analyzeBodyInner( return result; } +pub fn resolveInstAllowNone(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { + if (zir_ref == .none) { + return .none; + } else { + return resolveInst(sema, zir_ref); + } +} + pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { + assert(zir_ref != .none); const i = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. // We intentionally map the same indexes to the same values between ZIR and AIR. @@ -1969,6 +1978,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( inst: Air.Inst.Ref, make_runtime: *bool, ) CompileError!?Value { + assert(inst != .none); // First section of indexes correspond to a set number of constant values. const int = @enumToInt(inst); if (int < InternPool.static_len) { @@ -1985,17 +1995,17 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( } return opv; } + const air_datas = sema.air_instructions.items(.data); switch (air_tags[i]) { .constant => { - const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; + const ty_pl = air_datas[i].ty_pl; const val = sema.air_values.items[ty_pl.payload]; if (val.tag() == .runtime_value) make_runtime.* = true; if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, - .const_ty => { - return try sema.air_instructions.items(.data)[i].ty.toValue(sema.arena); - }, + .const_ty => return try air_datas[i].ty.toValue(sema.arena), + .interned => return air_datas[i].interned.toValue(), else => return null, } } @@ -7913,15 +7923,10 @@ fn emitDbgInline( }); } -fn zirIntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - _ = block; - const tracy = trace(@src()); - defer tracy.end(); - +fn zirIntType(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const int_type = sema.code.instructions.items(.data)[inst].int_type; const ty = try mod.intType(int_type.signedness, int_type.bit_count); - return sema.addType(ty); } @@ -17509,7 +17514,7 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) const tracy = trace(@src()); defer tracy.end(); - const saved_index = if (Zir.refToIndex(inst_data.block)) |zir_block| b: { + const saved_index = if (Zir.refToIndexAllowNone(inst_data.block)) |zir_block| b: { var block = start_block; while (true) { if (block.label) |label| { @@ -17535,7 +17540,7 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere - const operand = try sema.resolveInst(inst_data.operand); + const operand = try sema.resolveInstAllowNone(inst_data.operand); return sema.popErrorReturnTrace(start_block, src, operand, saved_index); } diff --git a/src/Zir.zig b/src/Zir.zig index 7b708ab04e62..8c03dfd060f0 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2132,7 +2132,7 @@ pub const Inst = struct { /// This Ref does not correspond to any ZIR instruction or constant /// value and may instead be used as a sentinel to indicate null. - none = std.math.maxInt(u32), + none = @enumToInt(InternPool.Index.none), _, }; @@ -3814,13 +3814,14 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { }; } -const ref_start_index: u32 = InternPool.static_len; +pub const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Inst.Index) Inst.Ref { return @intToEnum(Inst.Ref, ref_start_index + inst); } pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { + assert(inst != .none); const ref_int = @enumToInt(inst); if (ref_int >= ref_start_index) { return ref_int - ref_start_index; @@ -3828,3 +3829,8 @@ pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { return null; } } + +pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index { + if (inst == .none) return null; + return refToIndex(inst); +} diff --git a/src/print_air.zig b/src/print_air.zig index 8717bdc6bfe5..4396a26f4461 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -366,10 +366,12 @@ const Writer = struct { } fn writeType(w: *Writer, s: anytype, ty: Type) !void { - const t = ty.tag(); - switch (t) { - .inferred_alloc_const => try s.writeAll("(inferred_alloc_const)"), - .inferred_alloc_mut => try s.writeAll("(inferred_alloc_mut)"), + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .inferred_alloc_const => try s.writeAll("(inferred_alloc_const)"), + .inferred_alloc_mut => try s.writeAll("(inferred_alloc_mut)"), + else => try ty.print(s, w.module), + }, else => try ty.print(s, w.module), } } From 70a4b76acaef8d4062f4d5317af398929ea6c9c4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 16:32:11 -0700 Subject: [PATCH 026/205] std.builtin.AddressSpace: allocate one more bit to this enum --- lib/std/builtin.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index ff6d20370ca4..b1f1406684b3 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -190,7 +190,7 @@ pub const CallingConvention = enum { /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. -pub const AddressSpace = enum(u4) { +pub const AddressSpace = enum(u5) { generic, gs, fs, From 9ec0017f460854300004ab263bf585c2d376d1fb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 16:32:38 -0700 Subject: [PATCH 027/205] stage2: migrate many pointer types to the InternPool --- src/Air.zig | 14 +++--- src/InternPool.zig | 41 ++++++++++++----- src/Sema.zig | 42 +++++++++--------- src/arch/aarch64/CodeGen.zig | 8 ++-- src/arch/arm/CodeGen.zig | 8 ++-- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/codegen/c.zig | 12 ++--- src/codegen/llvm.zig | 10 ++--- src/codegen/spirv.zig | 13 +++--- src/type.zig | 85 ++++++++++++++++++++++++++---------- 11 files changed, 152 insertions(+), 85 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 3c04d17073c2..43fc55e81173 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1427,8 +1427,11 @@ pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const inst_index = ref_int - ref_start_index; const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); - assert(air_tags[inst_index] == .const_ty); - return air_datas[inst_index].ty; + return switch (air_tags[inst_index]) { + .const_ty => air_datas[inst_index].ty, + .interned => air_datas[inst_index].interned.toType(), + else => unreachable, + }; } /// Returns the requested data, as well as the new index which is at the start of the @@ -1492,6 +1495,7 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *const Module) ?Value { switch (air.instructions.items(.tag)[inst_index]) { .constant => return air.values[air_datas[inst_index].ty_pl.payload], .const_ty => unreachable, + .interned => return air_datas[inst_index].interned.toValue(), else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), } } @@ -1717,8 +1721,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { => false, .assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0, - .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtr(), - .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtr(), - .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtr(), + .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip), + .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip), + .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), }; } diff --git a/src/InternPool.zig b/src/InternPool.zig index 1da0572bd4c4..36afbadf3d00 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -73,7 +73,7 @@ pub const Key = union(enum) { /// If zero use pointee_type.abiAlignment() /// When creating pointer types, if alignment is equal to pointee type /// abi alignment, this value should be set to 0 instead. - alignment: u16 = 0, + alignment: u64 = 0, /// If this is non-zero it means the pointer points to a sub-byte /// range of data, which is backed by a "host integer" with this /// number of bytes. @@ -90,9 +90,9 @@ pub const Key = union(enum) { /// an appropriate value for this field. address_space: std.builtin.AddressSpace = .generic, - pub const VectorIndex = enum(u32) { - none = std.math.maxInt(u32), - runtime = std.math.maxInt(u32) - 1, + pub const VectorIndex = enum(u16) { + none = std.math.maxInt(u16), + runtime = std.math.maxInt(u16) - 1, _, }; }; @@ -806,16 +806,33 @@ pub const Pointer = struct { sentinel: Index, flags: Flags, packed_offset: PackedOffset, - vector_index: VectorIndex, + + /// Stored as a power-of-two, with one special value to indicate none. + pub const Alignment = enum(u6) { + none = std.math.maxInt(u6), + _, + + pub fn toByteUnits(a: Alignment, default: u64) u64 { + return switch (a) { + .none => default, + _ => @as(u64, 1) << @enumToInt(a), + }; + } + + pub fn fromByteUnits(n: u64) Alignment { + if (n == 0) return .none; + return @intToEnum(Alignment, @ctz(n)); + } + }; pub const Flags = packed struct(u32) { - alignment: u16, + size: Size, + alignment: Alignment, is_const: bool, is_volatile: bool, is_allowzero: bool, - size: Size, address_space: AddressSpace, - _: u7 = undefined, + vector_index: VectorIndex, }; pub const PackedOffset = packed struct(u32) { @@ -928,13 +945,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { return .{ .ptr_type = .{ .elem_type = ptr_info.child, .sentinel = ptr_info.sentinel, - .alignment = ptr_info.flags.alignment, + .alignment = ptr_info.flags.alignment.toByteUnits(0), .size = ptr_info.flags.size, .is_const = ptr_info.flags.is_const, .is_volatile = ptr_info.flags.is_volatile, .is_allowzero = ptr_info.flags.is_allowzero, .address_space = ptr_info.flags.address_space, - .vector_index = ptr_info.vector_index, + .vector_index = ptr_info.flags.vector_index, .host_size = ptr_info.packed_offset.host_size, .bit_offset = ptr_info.packed_offset.bit_offset, } }; @@ -1003,18 +1020,18 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .child = ptr_type.elem_type, .sentinel = ptr_type.sentinel, .flags = .{ - .alignment = ptr_type.alignment, + .alignment = Pointer.Alignment.fromByteUnits(ptr_type.alignment), .is_const = ptr_type.is_const, .is_volatile = ptr_type.is_volatile, .is_allowzero = ptr_type.is_allowzero, .size = ptr_type.size, .address_space = ptr_type.address_space, + .vector_index = ptr_type.vector_index, }, .packed_offset = .{ .host_size = ptr_type.host_size, .bit_offset = ptr_type.bit_offset, }, - .vector_index = ptr_type.vector_index, }), }); }, diff --git a/src/Sema.zig b/src/Sema.zig index 8abe6484eec1..39f39b43d9aa 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8400,7 +8400,7 @@ fn analyzeOptionalPayloadPtr( const child_type = opt_type.optionalChild(mod); const child_pointer = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = child_type, - .mutable = !optional_ptr_ty.isConstPtr(), + .mutable = !optional_ptr_ty.isConstPtr(mod), .@"addrspace" = optional_ptr_ty.ptrAddressSpace(mod), }); @@ -8594,7 +8594,7 @@ fn analyzeErrUnionPayloadPtr( const payload_ty = err_union_ty.errorUnionPayload(); const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = payload_ty, - .mutable = !operand_ty.isConstPtr(), + .mutable = !operand_ty.isConstPtr(mod), .@"addrspace" = operand_ty.ptrAddressSpace(mod), }); @@ -10147,7 +10147,7 @@ fn zirSwitchCapture( const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, .mutable = operand_ptr_ty.ptrIsMutable(mod), - .@"volatile" = operand_ptr_ty.isVolatilePtr(), + .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return sema.addConstant( @@ -10166,7 +10166,7 @@ fn zirSwitchCapture( const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, .mutable = operand_ptr_ty.ptrIsMutable(mod), - .@"volatile" = operand_ptr_ty.isVolatilePtr(), + .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty); @@ -15292,10 +15292,10 @@ fn zirCmpEq( } // comparing null with optionals - if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr())) { + if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr(mod))) { return sema.analyzeIsNull(block, src, rhs, op == .neq); } - if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr())) { + if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr(mod))) { return sema.analyzeIsNull(block, src, lhs, op == .neq); } @@ -22254,7 +22254,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const target = sema.mod.getTarget(); const mod = sema.mod; - if (dest_ty.isConstPtr()) { + if (dest_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{}); } @@ -22452,7 +22452,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_ptr_ty = sema.typeOf(dest_ptr); try checkMemOperand(sema, block, dest_src, dest_ptr_ty); - if (dest_ptr_ty.isConstPtr()) { + if (dest_ptr_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memset constant pointer", .{}); } @@ -24206,7 +24206,7 @@ fn fieldPtr( const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = slice_ptr_ty, .mutable = attr_ptr_ty.ptrIsMutable(mod), - .@"volatile" = attr_ptr_ty.isVolatilePtr(), + .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); @@ -24227,7 +24227,7 @@ fn fieldPtr( const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = Type.usize, .mutable = attr_ptr_ty.ptrIsMutable(mod), - .@"volatile" = attr_ptr_ty.isVolatilePtr(), + .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); @@ -24897,7 +24897,7 @@ fn unionFieldPtr( const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ .pointee_type = field.ty, .mutable = union_ptr_ty.ptrIsMutable(mod), - .@"volatile" = union_ptr_ty.isVolatilePtr(), + .@"volatile" = union_ptr_ty.isVolatilePtr(mod), .@"addrspace" = union_ptr_ty.ptrAddressSpace(mod), }); const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); @@ -25239,7 +25239,7 @@ fn tupleFieldPtr( const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, .mutable = tuple_ptr_ty.ptrIsMutable(mod), - .@"volatile" = tuple_ptr_ty.isVolatilePtr(), + .@"volatile" = tuple_ptr_ty.isVolatilePtr(mod), .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod), }); @@ -25767,7 +25767,7 @@ fn coerceExtra( } // coercion from C pointer - if (inst_ty.isCPtr()) src_c_ptr: { + if (inst_ty.isCPtr(mod)) src_c_ptr: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr; // In this case we must add a safety check because the C pointer // could be null. @@ -27255,7 +27255,7 @@ fn storePtr2( ) CompileError!void { const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); - if (ptr_ty.isConstPtr()) + if (ptr_ty.isConstPtr(mod)) return sema.fail(block, ptr_src, "cannot assign to constant", .{}); const elem_ty = ptr_ty.childType(mod); @@ -29843,7 +29843,7 @@ fn analyzeSlice( const result = try block.addBitCast(return_ty, new_ptr); if (block.wantSafety()) { // requirement: slicing C ptr is non-null - if (ptr_ptr_child_ty.isCPtr()) { + if (ptr_ptr_child_ty.isCPtr(mod)) { const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } @@ -29902,7 +29902,7 @@ fn analyzeSlice( try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { // requirement: slicing C ptr is non-null - if (ptr_ptr_child_ty.isCPtr()) { + if (ptr_ptr_child_ty.isCPtr(mod)) { const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } @@ -30720,7 +30720,7 @@ fn resolvePeerTypes( err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); } } - seen_const = seen_const or chosen_ty.isConstPtr(); + seen_const = seen_const or chosen_ty.isConstPtr(mod); chosen = candidate; chosen_i = candidate_i + 1; continue; @@ -30876,12 +30876,12 @@ fn resolvePeerTypes( .Optional => { const opt_child_ty = candidate_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, chosen_ty, opt_child_ty, false, target, src, src)) == .ok) { - seen_const = seen_const or opt_child_ty.isConstPtr(); + seen_const = seen_const or opt_child_ty.isConstPtr(mod); any_are_null = true; continue; } - seen_const = seen_const or chosen_ty.isConstPtr(); + seen_const = seen_const or chosen_ty.isConstPtr(mod); any_are_null = false; chosen = candidate; chosen_i = candidate_i + 1; @@ -30924,7 +30924,7 @@ fn resolvePeerTypes( .Vector => continue, else => {}, }, - .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr() and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) { + .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr(mod) and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) { if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(mod), candidate_ty, target, src, src)) { continue; } @@ -31023,7 +31023,7 @@ fn resolvePeerTypes( var info = chosen_ty.ptrInfo(mod); info.sentinel = chosen_child_ty.sentinel(mod); info.size = .Slice; - info.mutable = !(seen_const or chosen_child_ty.isConstPtr()); + info.mutable = !(seen_const or chosen_child_ty.isConstPtr(mod)); info.pointee_type = chosen_child_ty.elemType2(mod); const new_ptr_ty = try Type.ptr(sema.arena, mod, info); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 81169750c1a1..4a10691e02f9 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3430,9 +3430,10 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = slice_ty.slicePtrFieldType(&buf); @@ -3496,9 +3497,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -3869,7 +3871,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index c08cb58c48ea..3591ead53d4b 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2428,9 +2428,10 @@ fn ptrElemVal( } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = slice_ty.slicePtrFieldType(&buf); @@ -2527,9 +2528,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -2738,7 +2740,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 1e5858a948f9..1008d527f6a1 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1536,7 +1536,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index f8a62f9798b6..83e4b4f93d28 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1827,7 +1827,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index cd3974bc91d8..b0fb9fa480f2 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6117,7 +6117,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6159,7 +6159,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6221,7 +6221,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { if (use_atomic) try writer.writeAll("zig_atomic("); try f.renderType(writer, ty); if (use_atomic) try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6265,7 +6265,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", (zig_atomic("); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6299,7 +6299,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa try writer.writeAll("zig_atomic_store((zig_atomic("); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6365,7 +6365,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { return .none; } - if (elem_abi_size > 1 or dest_ty.isVolatilePtr()) { + if (elem_abi_size > 1 or dest_ty.isVolatilePtr(mod)) { // For the assignment in this loop, the array pointer needs to get // casted to a regular pointer, otherwise an error like this occurs: // error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 558534a651ee..7fa9b7433487 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -7046,7 +7046,7 @@ pub const FuncGen = struct { const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType(mod)); const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); - load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr())); + load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr(mod))); break :blk load_inst; }; const modified_vector = self.builder.buildInsertElement(loaded_vector, operand, index, ""); @@ -8221,7 +8221,7 @@ pub const FuncGen = struct { const usize_llvm_ty = try self.dg.lowerType(Type.usize); const len = usize_llvm_ty.constInt(operand_size, .False); const dest_ptr_align = ptr_ty.ptrAlignment(mod); - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr()); + _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr(mod)); if (safety and mod.comp.bin_file.options.valgrind) { self.valgrindMarkUndef(dest_ptr, len); } @@ -8497,7 +8497,7 @@ pub const FuncGen = struct { const dest_ptr_align = ptr_ty.ptrAlignment(mod); const u8_llvm_ty = self.context.intType(8); const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); - const is_volatile = ptr_ty.isVolatilePtr(); + const is_volatile = ptr_ty.isVolatilePtr(mod); if (self.air.value(bin_op.rhs, mod)) |elem_val| { if (elem_val.isUndefDeep()) { @@ -8621,7 +8621,7 @@ pub const FuncGen = struct { const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); const mod = self.dg.module; - const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr(); + const is_volatile = src_ptr_ty.isVolatilePtr(mod) or dest_ptr_ty.isVolatilePtr(mod); _ = self.builder.buildMemCpy( dest_ptr, dest_ptr_ty.ptrAlignment(mod), @@ -9894,7 +9894,7 @@ pub const FuncGen = struct { if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null; const ptr_alignment = info.alignment(mod); - const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr()); + const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr(mod)); assert(info.vector_index != .runtime); if (info.vector_index != .none) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 5fa81d19ff54..27a79c1c4518 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1689,7 +1689,7 @@ pub const DeclGen = struct { const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect); const result_id = self.spv.allocId(); const access = spec.MemoryAccess.Extended{ - .Volatile = ptr_ty.isVolatilePtr(), + .Volatile = ptr_ty.isVolatilePtr(mod), }; try self.func.body.emit(self.spv.gpa, .OpLoad, .{ .id_result_type = self.typeId(indirect_value_ty_ref), @@ -1705,7 +1705,7 @@ pub const DeclGen = struct { const value_ty = ptr_ty.childType(mod); const indirect_value_id = try self.convertToIndirect(value_ty, value_id); const access = spec.MemoryAccess.Extended{ - .Volatile = ptr_ty.isVolatilePtr(), + .Volatile = ptr_ty.isVolatilePtr(mod), }; try self.func.body.emit(self.spv.gpa, .OpStore, .{ .pointer = ptr_id, @@ -2464,9 +2464,10 @@ pub const DeclGen = struct { } fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; + if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2479,9 +2480,10 @@ pub const DeclGen = struct { } fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; + if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2781,10 +2783,11 @@ pub const DeclGen = struct { } fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ptr_ty = self.typeOf(ty_op.operand); const operand = try self.resolve(ty_op.operand); - if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; + if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; return try self.load(ptr_ty, operand); } diff --git a/src/type.zig b/src/type.zig index 4840bca6e7f6..db8c116f70a1 100644 --- a/src/type.zig +++ b/src/type.zig @@ -193,7 +193,7 @@ pub const Type = struct { .Frame, => false, - .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr()), + .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr(mod)), .Optional => { if (!is_equality_cmp) return false; return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); @@ -3012,38 +3012,59 @@ pub const Type = struct { } } - pub fn isConstPtr(self: Type) bool { - return switch (self.tag()) { - .pointer => !self.castTag(.pointer).?.data.mutable, - else => false, + pub fn isConstPtr(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => !ty.castTag(.pointer).?.data.mutable, + else => false, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.is_const, + else => false, + }, }; } - pub fn isVolatilePtr(self: Type) bool { - return switch (self.tag()) { - .pointer => { - const payload = self.castTag(.pointer).?.data; - return payload.@"volatile"; + pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { + return isVolatilePtrIp(ty, mod.intern_pool); + } + + pub fn isVolatilePtrIp(ty: Type, ip: InternPool) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.@"volatile", + else => false, + }, + else => switch (ip.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.is_volatile, + else => false, }, - else => false, }; } - pub fn isAllowzeroPtr(self: Type, mod: *const Module) bool { - return switch (self.tag()) { - .pointer => { - const payload = self.castTag(.pointer).?.data; - return payload.@"allowzero"; + pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.@"allowzero", + else => ty.zigTypeTag(mod) == .Optional, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.is_allowzero, + else => false, }, - else => return self.zigTypeTag(mod) == .Optional, }; } - pub fn isCPtr(self: Type) bool { - return switch (self.tag()) { - .pointer => self.castTag(.pointer).?.data.size == .C, - - else => return false, + pub fn isCPtr(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.size == .C, + else => false, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.size == .C, + else => false, + }, }; } @@ -5063,7 +5084,7 @@ pub const Type = struct { return .{ .pointee_type = p.elem_type.toType(), .sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null, - .@"align" = p.alignment, + .@"align" = @intCast(u32, p.alignment), .@"addrspace" = p.address_space, .bit_offset = p.bit_offset, .host_size = p.host_size, @@ -5248,6 +5269,24 @@ pub const Type = struct { } } + if (d.pointee_type.ip_index != .none and + (d.sentinel == null or d.sentinel.?.ip_index != .none)) + { + return mod.ptrType(.{ + .elem_type = d.pointee_type.ip_index, + .sentinel = if (d.sentinel) |s| s.ip_index else .none, + .alignment = d.@"align", + .host_size = d.host_size, + .bit_offset = d.bit_offset, + .vector_index = d.vector_index, + .size = d.size, + .is_const = !d.mutable, + .is_volatile = d.@"volatile", + .is_allowzero = d.@"allowzero", + .address_space = d.@"addrspace", + }); + } + return Type.Tag.pointer.create(arena, d); } From 0471638734257d479d21abcb490ed9459df42b9b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 18:07:25 -0700 Subject: [PATCH 028/205] InternPool: add a dump function So we can see stats --- src/Compilation.zig | 5 ++ src/InternPool.zig | 155 ++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 148 insertions(+), 12 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 15e393c35ce4..9b2128a590d8 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2026,6 +2026,11 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void try comp.performAllTheWork(main_progress_node); if (comp.bin_file.options.module) |module| { + std.debug.print("intern pool stats for '{s}':\n", .{ + comp.bin_file.options.root_name, + }); + module.intern_pool.dump(); + if (comp.bin_file.options.is_test and comp.totalErrorCount() == 0) { // The `test_functions` decl has been intentionally postponed until now, // at which point we must populate it with the list of test functions that diff --git a/src/InternPool.zig b/src/InternPool.zig index 36afbadf3d00..c2d04067162f 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -722,10 +722,10 @@ pub const Tag = enum(u8) { /// data is float value bitcasted to u32. float_f32, /// An f64 value. - /// data is payload index to Float64. + /// data is extra index to Float64. float_f64, /// An f128 value. - /// data is payload index to Float128. + /// data is extra index to Float128. float_f128, /// An extern function. extern_func, @@ -877,6 +877,33 @@ pub const Int = struct { limbs_len: u32, }; +/// A f64 value, broken up into 2 u32 parts. +pub const Float64 = struct { + piece0: u32, + piece1: u32, + + pub fn get(self: Float64) f64 { + const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32); + return @bitCast(u64, int_bits); + } +}; + +/// A f128 value, broken up into 4 u32 parts. +pub const Float128 = struct { + piece0: u32, + piece1: u32, + piece2: u32, + piece3: u32, + + pub fn get(self: Float128) f128 { + const int_bits = @as(u128, self.piece0) | + (@as(u128, self.piece1) << 32) | + (@as(u128, self.piece2) << 64) | + (@as(u128, self.piece3) << 96); + return @bitCast(f128, int_bits); + } +}; + pub fn init(ip: *InternPool, gpa: Allocator) !void { assert(ip.items.len == 0); @@ -1293,20 +1320,42 @@ fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const usize) void { } fn extraData(ip: InternPool, comptime T: type, index: usize) T { - const fields = std.meta.fields(T); - var i: usize = index; var result: T = undefined; - inline for (fields) |field| { + inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { + const int32 = ip.extra.items[i + index]; @field(result, field.name) = switch (field.type) { - u32 => ip.extra.items[i], - Index => @intToEnum(Index, ip.extra.items[i]), - i32 => @bitCast(i32, ip.extra.items[i]), - Pointer.Flags => @bitCast(Pointer.Flags, ip.extra.items[i]), - Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, ip.extra.items[i]), - Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, ip.extra.items[i]), + u32 => int32, + Index => @intToEnum(Index, int32), + i32 => @bitCast(i32, int32), + Pointer.Flags => @bitCast(Pointer.Flags, int32), + Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), + Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }; + } + return result; +} + +/// Asserts the struct has 32-bit fields and the number of fields is evenly divisible by 2. +fn limbData(ip: InternPool, comptime T: type, index: usize) T { + switch (@sizeOf(usize)) { + @sizeOf(u32) => return extraData(ip, T, index), + @sizeOf(u64) => {}, + else => @compileError("unsupported host"), + } + var result: T = undefined; + inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { + const host_int = ip.limbs.items[index + i / 2]; + const int32 = if (i % 2 == 0) + @truncate(u32, host_int) + else + @truncate(u32, host_int >> 32); + + @field(result, field.name) = switch (field.type) { + u32 => int32, + Index => @intToEnum(Index, int32), else => @compileError("bad field type: " ++ @typeName(field.type)), }; - i += 1; } return result; } @@ -1350,3 +1399,85 @@ pub fn childType(ip: InternPool, i: Index) Index { else => unreachable, }; } + +pub fn dump(ip: InternPool) void { + dumpFallible(ip, std.heap.page_allocator) catch return; +} + +fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { + const items_size = (1 + 4) * ip.items.len; + const extra_size = 4 * ip.extra.items.len; + const limbs_size = 8 * ip.limbs.items.len; + + // TODO: map overhead size is not taken into account + const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size; + + std.debug.print( + \\InternPool size: {d} bytes + \\ items: {d} bytes + \\ extra: {d} bytes + \\ limbs: {d} bytes + \\ + , .{ total_size, items_size, extra_size, limbs_size }); + + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + const TagStats = struct { + count: usize = 0, + bytes: usize = 0, + }; + var counts = std.AutoArrayHashMap(Tag, TagStats).init(arena); + for (tags, datas) |tag, data| { + const gop = try counts.getOrPut(tag); + if (!gop.found_existing) gop.value_ptr.* = .{}; + gop.value_ptr.count += 1; + gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) { + .type_int_signed => 0, + .type_int_unsigned => 0, + .type_array => @sizeOf(Vector), + .type_vector => @sizeOf(Vector), + .type_pointer => @sizeOf(Pointer), + .type_optional => 0, + .type_error_union => @sizeOf(ErrorUnion), + .type_enum_simple => @sizeOf(EnumSimple), + .simple_type => 0, + .simple_value => 0, + .simple_internal => 0, + .int_u32 => 0, + .int_i32 => 0, + .int_usize => 0, + .int_comptime_int_u32 => 0, + .int_comptime_int_i32 => 0, + + .int_positive, + .int_negative, + .enum_tag_positive, + .enum_tag_negative, + => b: { + const int = ip.limbData(Int, data); + break :b @sizeOf(Int) + int.limbs_len * 8; + }, + + .float_f32 => 0, + .float_f64 => @sizeOf(Float64), + .float_f128 => @sizeOf(Float128), + .extern_func => @panic("TODO"), + .func => @panic("TODO"), + }); + } + const SortContext = struct { + map: *std.AutoArrayHashMap(Tag, TagStats), + pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { + const values = ctx.map.values(); + return values[a_index].bytes > values[b_index].bytes; + } + }; + counts.sort(SortContext{ .map = &counts }); + const len = @min(50, tags.len); + std.debug.print("top 50 tags:\n", .{}); + for (counts.keys()[0..len], counts.values()[0..len]) |tag, stats| { + std.debug.print(" {s}: {d} occurrences, {d} total bytes\n", .{ + @tagName(tag), stats.count, stats.bytes, + }); + } +} From ce3cffbd5a00a6ae44ec3f2c6550de1c52c293c4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 18:07:43 -0700 Subject: [PATCH 029/205] fill out more InternPool Type methods particularly, printing types --- src/Sema.zig | 32 +++++-- src/print_air.zig | 2 +- src/type.zig | 208 +++++++++++++++++++++++++++++++--------------- src/value.zig | 64 ++++++++------ 4 files changed, 206 insertions(+), 100 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 39f39b43d9aa..088d830280aa 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -31485,11 +31485,18 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), + .ptr_type => |ptr_type| { + const child_ty = ptr_type.elem_type.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) { + return child_ty.fnInfo().is_generic; + } else { + return sema.resolveTypeRequiresComptime(child_ty); + } + }, + .array_type => |array_type| return sema.resolveTypeRequiresComptime(array_type.child.toType()), .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), - .opt_type => @panic("TODO"), - .error_union_type => @panic("TODO"), + .opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()), + .error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()), .simple_type => |t| switch (t) { .f16, .f32, @@ -33528,11 +33535,20 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { if (ty.ip_index != .none) { switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return false, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), + .ptr_type => |ptr_type| { + const child_ty = ptr_type.elem_type.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) { + return child_ty.fnInfo().is_generic; + } else { + return sema.typeRequiresComptime(child_ty); + } + }, + .array_type => |array_type| return sema.typeRequiresComptime(array_type.child.toType()), .vector_type => |vector_type| return sema.typeRequiresComptime(vector_type.child.toType()), - .opt_type => @panic("TODO"), - .error_union_type => @panic("TODO"), + .opt_type => |child| return sema.typeRequiresComptime(child.toType()), + .error_union_type => |error_union_type| { + return sema.typeRequiresComptime(error_union_type.payload_type.toType()); + }, .simple_type => |t| return switch (t) { .f16, .f32, diff --git a/src/print_air.zig b/src/print_air.zig index 4396a26f4461..a1f277870e71 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -95,7 +95,7 @@ const Writer = struct { for (w.air.instructions.items(.tag), 0..) |tag, i| { const inst = @intCast(Air.Inst.Index, i); switch (tag) { - .constant, .const_ty => { + .constant, .const_ty, .interned => { try w.writeInst(s, inst); try s.writeByte('\n'); }, diff --git a/src/type.zig b/src/type.zig index db8c116f70a1..cbc0b5bceabf 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1274,11 +1274,77 @@ pub const Type = struct { }; return writer.print("{c}{d}", .{ sign_char, int_type.bits }); }, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .opt_type => @panic("TODO"), - .error_union_type => @panic("TODO"), + .ptr_type => { + const info = ty.ptrInfo(mod); + + if (info.sentinel) |s| switch (info.size) { + .One, .C => unreachable, + .Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, mod)}), + .Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, mod)}), + } else switch (info.size) { + .One => try writer.writeAll("*"), + .Many => try writer.writeAll("[*]"), + .C => try writer.writeAll("[*c]"), + .Slice => try writer.writeAll("[]"), + } + if (info.@"align" != 0 or info.host_size != 0 or info.vector_index != .none) { + if (info.@"align" != 0) { + try writer.print("align({d}", .{info.@"align"}); + } else { + const alignment = info.pointee_type.abiAlignment(mod); + try writer.print("align({d}", .{alignment}); + } + + if (info.bit_offset != 0 or info.host_size != 0) { + try writer.print(":{d}:{d}", .{ info.bit_offset, info.host_size }); + } + if (info.vector_index == .runtime) { + try writer.writeAll(":?"); + } else if (info.vector_index != .none) { + try writer.print(":{d}", .{@enumToInt(info.vector_index)}); + } + try writer.writeAll(") "); + } + if (info.@"addrspace" != .generic) { + try writer.print("addrspace(.{s}) ", .{@tagName(info.@"addrspace")}); + } + if (!info.mutable) try writer.writeAll("const "); + if (info.@"volatile") try writer.writeAll("volatile "); + if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero "); + + try print(info.pointee_type, writer, mod); + return; + }, + .array_type => |array_type| { + if (array_type.sentinel == .none) { + try writer.print("[{d}]", .{array_type.len}); + try print(array_type.child.toType(), writer, mod); + } else { + try writer.print("[{d}:{}]", .{ + array_type.len, + array_type.sentinel.toValue().fmtValue(array_type.child.toType(), mod), + }); + try print(array_type.child.toType(), writer, mod); + } + return; + }, + .vector_type => |vector_type| { + try writer.print("@Vector({d}, ", .{vector_type.len}); + try print(vector_type.child.toType(), writer, mod); + try writer.writeAll(")"); + return; + }, + .opt_type => |child| { + try writer.writeByte('?'); + try print(child.toType(), writer, mod); + return; + }, + .error_union_type => |error_union_type| { + try print(error_union_type.error_set_type.toType(), writer, mod); + try writer.writeByte('!'); + try print(error_union_type.payload_type.toType(), writer, mod); + return; + }, .simple_type => |s| return writer.writeAll(@tagName(s)), .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -2055,8 +2121,8 @@ pub const Type = struct { return AbiAlignmentAdvanced{ .scalar = alignment }; }, - .opt_type => @panic("TODO"), - .error_union_type => @panic("TODO"), + .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), + .error_union_type => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), .simple_type => |t| switch (t) { .bool, .atomic_order, @@ -2157,64 +2223,8 @@ pub const Type = struct { .array, .array_sentinel => return ty.childType(mod).abiAlignmentAdvanced(mod, strat), - .optional => { - const child_type = ty.optionalChild(mod); - - switch (child_type.zigTypeTag(mod)) { - .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), - .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 }, - else => {}, - } - - switch (strat) { - .eager, .sema => { - if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, - else => |e| return e, - })) { - return AbiAlignmentAdvanced{ .scalar = 1 }; - } - return child_type.abiAlignmentAdvanced(mod, strat); - }, - .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(mod, strat)) { - .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) }, - .val => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }, - } - }, - - .error_union => { - // This code needs to be kept in sync with the equivalent switch prong - // in abiSizeAdvanced. - const data = ty.castTag(.error_union).?.data; - const code_align = abiAlignment(Type.anyerror, mod); - switch (strat) { - .eager, .sema => { - if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, - else => |e| return e, - })) { - return AbiAlignmentAdvanced{ .scalar = code_align }; - } - return AbiAlignmentAdvanced{ .scalar = @max( - code_align, - (try data.payload.abiAlignmentAdvanced(mod, strat)).scalar, - ) }; - }, - .lazy => |arena| { - switch (try data.payload.abiAlignmentAdvanced(mod, strat)) { - .scalar => |payload_align| { - return AbiAlignmentAdvanced{ - .scalar = @max(code_align, payload_align), - }; - }, - .val => {}, - } - return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; - }, - } - }, + .optional => return abiAlignmentAdvancedOptional(ty, mod, strat), + .error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -2321,6 +2331,74 @@ pub const Type = struct { } } + fn abiAlignmentAdvancedErrorUnion( + ty: Type, + mod: *const Module, + strat: AbiAlignmentAdvancedStrat, + ) Module.CompileError!AbiAlignmentAdvanced { + // This code needs to be kept in sync with the equivalent switch prong + // in abiSizeAdvanced. + const data = ty.castTag(.error_union).?.data; + const code_align = abiAlignment(Type.anyerror, mod); + switch (strat) { + .eager, .sema => { + if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + else => |e| return e, + })) { + return AbiAlignmentAdvanced{ .scalar = code_align }; + } + return AbiAlignmentAdvanced{ .scalar = @max( + code_align, + (try data.payload.abiAlignmentAdvanced(mod, strat)).scalar, + ) }; + }, + .lazy => |arena| { + switch (try data.payload.abiAlignmentAdvanced(mod, strat)) { + .scalar => |payload_align| { + return AbiAlignmentAdvanced{ + .scalar = @max(code_align, payload_align), + }; + }, + .val => {}, + } + return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; + }, + } + } + + fn abiAlignmentAdvancedOptional( + ty: Type, + mod: *const Module, + strat: AbiAlignmentAdvancedStrat, + ) Module.CompileError!AbiAlignmentAdvanced { + const target = mod.getTarget(); + const child_type = ty.optionalChild(mod); + + switch (child_type.zigTypeTag(mod)) { + .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), + .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 }, + else => {}, + } + + switch (strat) { + .eager, .sema => { + if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + else => |e| return e, + })) { + return AbiAlignmentAdvanced{ .scalar = 1 }; + } + return child_type.abiAlignmentAdvanced(mod, strat); + }, + .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) }, + .val => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + }, + } + } + pub fn abiAlignmentAdvancedUnion( ty: Type, mod: *const Module, diff --git a/src/value.zig b/src/value.zig index 588218f6703c..a34a022deabe 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1848,8 +1848,6 @@ pub const Value = struct { /// Asserts the value is comparable. /// If opt_sema is null then this function asserts things are resolved and cannot fail. pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *const Module, opt_sema: ?*Sema) !std.math.Order { - const lhs_tag = lhs.tag(); - const rhs_tag = rhs.tag(); const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema); switch (lhs_against_zero) { @@ -1866,6 +1864,8 @@ pub const Value = struct { const lhs_float = lhs.isFloat(); const rhs_float = rhs.isFloat(); if (lhs_float and rhs_float) { + const lhs_tag = lhs.tag(); + const rhs_tag = rhs.tag(); if (lhs_tag == rhs_tag) { return switch (lhs.tag()) { .float_16 => return std.math.order(lhs.castTag(.float_16).?.data, rhs.castTag(.float_16).?.data), @@ -2601,12 +2601,15 @@ pub const Value = struct { /// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr), /// this function returns null. pub fn pointerDecl(val: Value) ?Module.Decl.Index { - return switch (val.tag()) { - .decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl_index, - .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, - .function => val.castTag(.function).?.data.owner_decl, - .variable => val.castTag(.variable).?.data.owner_decl, - .decl_ref => val.cast(Payload.Decl).?.data, + return switch (val.ip_index) { + .none => switch (val.tag()) { + .decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl_index, + .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, + .function => val.castTag(.function).?.data.owner_decl, + .variable => val.castTag(.variable).?.data.owner_decl, + .decl_ref => val.cast(Payload.Decl).?.data, + else => null, + }, else => null, }; } @@ -3831,35 +3834,44 @@ pub const Value = struct { /// Returns true if the value is a floating point type and is NaN. Returns false otherwise. pub fn isNan(val: Value) bool { - return switch (val.tag()) { - .float_16 => std.math.isNan(val.castTag(.float_16).?.data), - .float_32 => std.math.isNan(val.castTag(.float_32).?.data), - .float_64 => std.math.isNan(val.castTag(.float_64).?.data), - .float_80 => std.math.isNan(val.castTag(.float_80).?.data), - .float_128 => std.math.isNan(val.castTag(.float_128).?.data), + return switch (val.ip_index) { + .none => switch (val.tag()) { + .float_16 => std.math.isNan(val.castTag(.float_16).?.data), + .float_32 => std.math.isNan(val.castTag(.float_32).?.data), + .float_64 => std.math.isNan(val.castTag(.float_64).?.data), + .float_80 => std.math.isNan(val.castTag(.float_80).?.data), + .float_128 => std.math.isNan(val.castTag(.float_128).?.data), + else => false, + }, else => false, }; } /// Returns true if the value is a floating point type and is infinite. Returns false otherwise. pub fn isInf(val: Value) bool { - return switch (val.tag()) { - .float_16 => std.math.isInf(val.castTag(.float_16).?.data), - .float_32 => std.math.isInf(val.castTag(.float_32).?.data), - .float_64 => std.math.isInf(val.castTag(.float_64).?.data), - .float_80 => std.math.isInf(val.castTag(.float_80).?.data), - .float_128 => std.math.isInf(val.castTag(.float_128).?.data), + return switch (val.ip_index) { + .none => switch (val.tag()) { + .float_16 => std.math.isInf(val.castTag(.float_16).?.data), + .float_32 => std.math.isInf(val.castTag(.float_32).?.data), + .float_64 => std.math.isInf(val.castTag(.float_64).?.data), + .float_80 => std.math.isInf(val.castTag(.float_80).?.data), + .float_128 => std.math.isInf(val.castTag(.float_128).?.data), + else => false, + }, else => false, }; } pub fn isNegativeInf(val: Value) bool { - return switch (val.tag()) { - .float_16 => std.math.isNegativeInf(val.castTag(.float_16).?.data), - .float_32 => std.math.isNegativeInf(val.castTag(.float_32).?.data), - .float_64 => std.math.isNegativeInf(val.castTag(.float_64).?.data), - .float_80 => std.math.isNegativeInf(val.castTag(.float_80).?.data), - .float_128 => std.math.isNegativeInf(val.castTag(.float_128).?.data), + return switch (val.ip_index) { + .none => switch (val.tag()) { + .float_16 => std.math.isNegativeInf(val.castTag(.float_16).?.data), + .float_32 => std.math.isNegativeInf(val.castTag(.float_32).?.data), + .float_64 => std.math.isNegativeInf(val.castTag(.float_64).?.data), + .float_80 => std.math.isNegativeInf(val.castTag(.float_80).?.data), + .float_128 => std.math.isNegativeInf(val.castTag(.float_128).?.data), + else => false, + }, else => false, }; } From 3e6dd0da7a9aae1e6e3d3e0d897a2b3a28bfc043 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 18:34:05 -0700 Subject: [PATCH 030/205] stage2: add tmp_hack_arena for the InternPool transition Temporarily used for some unfortunate allocations made by backends that need to construct pointer types that can't be represented by the InternPool. Once all types are migrated to be stored in the InternPool, this can be removed. --- src/Compilation.zig | 1 + src/InternPool.zig | 1 + src/Module.zig | 20 ++++++++++++++++++++ 3 files changed, 22 insertions(+) diff --git a/src/Compilation.zig b/src/Compilation.zig index 9b2128a590d8..75af9362f69e 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1316,6 +1316,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .local_zir_cache = local_zir_cache, .emit_h = emit_h, .error_name_list = .{}, + .tmp_hack_arena = std.heap.ArenaAllocator.init(gpa), }; try module.init(); diff --git a/src/InternPool.zig b/src/InternPool.zig index c2d04067162f..de69b19dbe2f 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1040,6 +1040,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, .ptr_type => |ptr_type| { + assert(ptr_type.elem_type != .none); // TODO introduce more pointer encodings ip.items.appendAssumeCapacity(.{ .tag = .type_pointer, diff --git a/src/Module.zig b/src/Module.zig index bfc06ac5abec..e9658ad89f41 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -98,6 +98,10 @@ string_literal_bytes: ArrayListUnmanaged(u8) = .{}, /// Stores all Type and Value objects; periodically garbage collected. intern_pool: InternPool = .{}, +/// Temporarily used for some unfortunate allocations made by backends that need to construct +/// pointer types that can't be represented by the InternPool. Once all types are migrated +/// to be stored in the InternPool, this can be removed. +tmp_hack_arena: std.heap.ArenaAllocator, /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch @@ -3552,6 +3556,7 @@ pub fn deinit(mod: *Module) void { mod.string_literal_bytes.deinit(gpa); mod.intern_pool.deinit(gpa); + mod.tmp_hack_arena.deinit(); } pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { @@ -6848,10 +6853,25 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type } pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + if (child_type.ip_index == .none) { + // TODO remove this after all types can be represented via the InternPool + return Type.Tag.pointer.create(mod.tmp_hack_arena.allocator(), .{ + .pointee_type = child_type, + .@"addrspace" = .generic, + }); + } return ptrType(mod, .{ .elem_type = child_type.ip_index }); } pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + if (child_type.ip_index == .none) { + // TODO remove this after all types can be represented via the InternPool + return Type.Tag.pointer.create(mod.tmp_hack_arena.allocator(), .{ + .pointee_type = child_type, + .mutable = false, + .@"addrspace" = .generic, + }); + } return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); } From 80bf5af3458b25ae7375b548dc7f42150fbef3c8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 18:34:52 -0700 Subject: [PATCH 031/205] fix AIR printing of interned constants --- src/TypedValue.zig | 8 ++++++-- src/print_air.zig | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 0efd39637390..d4ee7661d559 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -413,8 +413,12 @@ pub fn print( .runtime_value => return writer.writeAll("[runtime value]"), }, else => { - try writer.print("(interned: {})", .{val.ip_index}); - return; + const key = mod.intern_pool.indexToKey(val.ip_index); + if (key.typeOf() == .type_type) { + return Type.print(val.toType(), writer, mod); + } else { + return writer.print("{}", .{val.ip_index}); + } }, }; } diff --git a/src/print_air.zig b/src/print_air.zig index a1f277870e71..8cff41777094 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -621,7 +621,7 @@ const Writer = struct { fn writeInterned(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const mod = w.module; const ip_index = w.air.instructions.items(.data)[inst].interned; - const ty = ip_index.toType(); + const ty = mod.intern_pool.indexToKey(ip_index).typeOf().toType(); try w.writeType(s, ty); try s.print(", {}", .{ip_index.toValue().fmtValue(ty, mod)}); } From 08e97639513f09e2797bd7afcdfdfecdad6c6fd8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 18:35:08 -0700 Subject: [PATCH 032/205] stage2: add missing comptimeOnly logic for InternPool --- src/type.zig | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/type.zig b/src/type.zig index cbc0b5bceabf..a51ae273c1a3 100644 --- a/src/type.zig +++ b/src/type.zig @@ -4076,11 +4076,18 @@ pub const Type = struct { pub fn comptimeOnly(ty: Type, mod: *const Module) bool { if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, - .ptr_type => @panic("TODO"), - .array_type => |array_type| return array_type.child.toType().comptimeOnly(mod), - .vector_type => |vector_type| return vector_type.child.toType().comptimeOnly(mod), - .opt_type => @panic("TODO"), - .error_union_type => @panic("TODO"), + .ptr_type => |ptr_type| { + const child_ty = ptr_type.elem_type.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) { + return false; + } else { + return child_ty.comptimeOnly(mod); + } + }, + .array_type => |array_type| array_type.child.toType().comptimeOnly(mod), + .vector_type => |vector_type| vector_type.child.toType().comptimeOnly(mod), + .opt_type => |child| child.toType().comptimeOnly(mod), + .error_union_type => |error_union_type| error_union_type.payload_type.toType().comptimeOnly(mod), .simple_type => |t| switch (t) { .f16, .f32, From 31aee50c1a96b7e72b42ee885636b27fbcac8eb4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 19:13:43 -0700 Subject: [PATCH 033/205] InternPool: add a slice encoding This uses the data field to reference its pointer field type, which allows for efficient and infallible access of a slice type's pointer type. --- src/InternPool.zig | 38 +++++++++++++++++++++ src/Module.zig | 2 +- src/Sema.zig | 11 +++--- src/arch/aarch64/CodeGen.zig | 2 +- src/arch/arm/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/x86_64/CodeGen.zig | 11 +++--- src/codegen.zig | 6 ++-- src/codegen/c.zig | 13 +++---- src/codegen/c/type.zig | 2 +- src/codegen/llvm.zig | 14 ++++---- src/codegen/spirv.zig | 6 ++-- src/link/Dwarf.zig | 2 +- src/type.zig | 66 ++++++++++++++++++++++-------------- src/value.zig | 8 ++--- 15 files changed, 120 insertions(+), 65 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index de69b19dbe2f..15b6e318edbc 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -668,6 +668,9 @@ pub const Tag = enum(u8) { /// A fully explicitly specified pointer type. /// data is payload to Pointer. type_pointer, + /// A slice type. + /// data is Index of underlying pointer type. + type_slice, /// An optional type. /// data is the child type. type_optional, @@ -984,6 +987,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }; }, + .type_slice => { + const ptr_ty_index = @intToEnum(Index, data); + var result = indexToKey(ip, ptr_ty_index); + result.ptr_type.size = .Slice; + return result; + }, + .type_optional => .{ .opt_type = @intToEnum(Index, data) }, .type_error_union => @panic("TODO"), @@ -1041,6 +1051,19 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .ptr_type => |ptr_type| { assert(ptr_type.elem_type != .none); + + if (ptr_type.size == .Slice) { + var new_key = key; + new_key.ptr_type.size = .Many; + const ptr_ty_index = try get(ip, gpa, new_key); + try ip.items.ensureUnusedCapacity(gpa, 1); + ip.items.appendAssumeCapacity(.{ + .tag = .type_slice, + .data = @enumToInt(ptr_ty_index), + }); + return @intToEnum(Index, ip.items.len - 1); + } + // TODO introduce more pointer encodings ip.items.appendAssumeCapacity(.{ .tag = .type_pointer, @@ -1401,6 +1424,20 @@ pub fn childType(ip: InternPool, i: Index) Index { }; } +/// Given a slice type, returns the type of the pointer field. +pub fn slicePtrType(ip: InternPool, i: Index) Index { + switch (i) { + .const_slice_u8_type => return .manyptr_const_u8_type, + .const_slice_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, + else => {}, + } + const item = ip.items.get(@enumToInt(i)); + switch (item.tag) { + .type_slice => return @intToEnum(Index, item.data), + else => unreachable, // not a slice type + } +} + pub fn dump(ip: InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } @@ -1438,6 +1475,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_array => @sizeOf(Vector), .type_vector => @sizeOf(Vector), .type_pointer => @sizeOf(Pointer), + .type_slice => 0, .type_optional => 0, .type_error_union => @sizeOf(ErrorUnion), .type_enum_simple => @sizeOf(EnumSimple), diff --git a/src/Module.zig b/src/Module.zig index e9658ad89f41..01e240337720 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6553,7 +6553,7 @@ pub fn populateTestFunctions( } const decl = mod.declPtr(decl_index); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).childType(mod); + const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf, mod).childType(mod); const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions diff --git a/src/Sema.zig b/src/Sema.zig index 088d830280aa..ced5eb247c4f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -24201,7 +24201,7 @@ fn fieldPtr( if (mem.eql(u8, field_name, "ptr")) { const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); - const slice_ptr_ty = inner_ty.slicePtrFieldType(buf); + const slice_ptr_ty = inner_ty.slicePtrFieldType(buf, mod); const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = slice_ptr_ty, @@ -27804,7 +27804,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), + parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer), mod), &val_ptr.castTag(.slice).?.data.ptr, ptr_elem_ty, parent.decl_ref_mut, @@ -27859,7 +27859,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), + parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer), mod), &val_ptr.castTag(.slice).?.data.ptr, ptr_elem_ty, parent.decl_ref_mut, @@ -28256,7 +28256,7 @@ fn beginComptimePtrLoad( const slice_val = tv.val.castTag(.slice).?.data; deref.pointee = switch (field_index) { Value.Payload.Slice.ptr_index => TypedValue{ - .ty = field_ptr.container_ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), + .ty = field_ptr.container_ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer), mod), .val = slice_val.ptr, }, Value.Payload.Slice.len_index => TypedValue{ @@ -29339,8 +29339,9 @@ fn analyzeSlicePtr( slice: Air.Inst.Ref, slice_ty: Type, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); - const result_ty = slice_ty.slicePtrFieldType(buf); + const result_ty = slice_ty.slicePtrFieldType(buf, mod); if (try sema.resolveMaybeUndefVal(slice)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); return sema.addConstant(result_ty, val.slicePtr()); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 4a10691e02f9..95a8350c7d04 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3435,7 +3435,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&buf); + const ptr_ty = slice_ty.slicePtrFieldType(&buf, mod); const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 3591ead53d4b..cc2bc3a61346 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2433,7 +2433,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&buf); + const ptr_ty = slice_ty.slicePtrFieldType(&buf, mod); const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 83e4b4f93d28..4231222d4b7e 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -2462,7 +2462,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf, mod); const index_lock: ?RegisterLock = if (index_mcv == .register) self.register_manager.lockRegAssumeUnused(index_mcv.register) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index f6304a0ff336..ee604afd0fa9 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4056,7 +4056,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf, mod); const index_ty = self.typeOf(rhs); const index_mcv = try self.resolveInst(rhs); @@ -4081,11 +4081,12 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf, mod); const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs); const dst_mcv = try self.allocRegOrMem(inst, false); try self.load(dst_mcv, slice_ptr_field_type, elem_ptr); @@ -8682,7 +8683,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) - .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf, mod) else pl_ty } else .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; @@ -8774,7 +8775,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) - .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf, mod) else pl_ty } else .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; @@ -10813,7 +10814,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { switch (dst_ptr_ty.ptrSize(mod)) { .Slice => { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf); + const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf, mod); // TODO: this only handles slices stored in the stack const ptr = dst_ptr; diff --git a/src/codegen.zig b/src/codegen.zig index 25e8d892d846..5f5a3f66be21 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -361,7 +361,7 @@ pub fn generateSymbol( // generate ptr var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf, mod); switch (try generateSymbol(bin_file, src_loc, .{ .ty = slice_ptr_field_type, .val = slice.ptr, @@ -851,7 +851,7 @@ fn lowerParentPtr( var buf: Type.SlicePtrFieldTypeBuffer = undefined; break :offset switch (field_ptr.field_index) { 0 => 0, - 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(mod), + 1 => field_ptr.container_ty.slicePtrFieldType(&buf, mod).abiSize(mod), else => unreachable, }; }, @@ -951,7 +951,7 @@ fn lowerDeclRef( if (typed_value.ty.isSlice(mod)) { // generate ptr var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf, mod); switch (try generateSymbol(bin_file, src_loc, .{ .ty = slice_ptr_field_type, .val = typed_value.val, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index b0fb9fa480f2..039c75de6720 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -566,7 +566,7 @@ pub const DeclGen = struct { } var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try dg.renderValue(writer, ty.slicePtrFieldType(&buf), val.slicePtr(), .Initializer); + try dg.renderValue(writer, ty.slicePtrFieldType(&buf, mod), val.slicePtr(), .Initializer); var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -787,7 +787,7 @@ pub const DeclGen = struct { try writer.writeAll("{("); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); + const ptr_ty = ty.slicePtrFieldType(&buf, mod); try dg.renderType(writer, ptr_ty); return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); } else { @@ -1088,7 +1088,7 @@ pub const DeclGen = struct { var buf: Type.SlicePtrFieldTypeBuffer = undefined; try writer.writeByte('{'); - try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, initializer_type); + try dg.renderValue(writer, ty.slicePtrFieldType(&buf, mod), slice.ptr, initializer_type); try writer.writeAll(", "); try dg.renderValue(writer, Type.usize, slice.len, initializer_type); try writer.writeByte('}'); @@ -4107,6 +4107,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons } fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4116,7 +4117,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = inst_ty.slicePtrFieldType(&buf); + const ptr_ty = inst_ty.slicePtrFieldType(&buf, mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -5112,7 +5113,7 @@ fn airIsNull( TypedValue{ .ty = payload_ty, .val = Value.zero } else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); - const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf); + const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf, mod); break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null }; } else rhs: { try writer.writeAll(".is_null"); @@ -5845,7 +5846,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { // &(*(void *)p)[0], although LLVM does via GetElementPtr if (operand == .undef) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf) }, .Initializer); + try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf, mod) }, .Initializer); } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 9e6de6cb21ff..84ddce680912 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1432,7 +1432,7 @@ pub const CType = extern union { .payload => unreachable, }) |fwd_idx| { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); + const ptr_ty = ty.slicePtrFieldType(&buf, mod); if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| { self.storage = .{ .anon = undefined }; self.storage.anon.fields[0] = .{ diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 7fa9b7433487..5289becf1ead 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1638,7 +1638,7 @@ pub const Object = struct { if (ty.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); + const ptr_ty = ty.slicePtrFieldType(&buf, mod); const len_ty = Type.usize; const name = try ty.nameAlloc(gpa, o.module); @@ -2822,7 +2822,7 @@ pub const DeclGen = struct { .Pointer => { if (t.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_type = t.slicePtrFieldType(&buf); + const ptr_type = t.slicePtrFieldType(&buf, mod); const fields: [2]*llvm.Type = .{ try dg.lowerType(ptr_type), @@ -3182,9 +3182,9 @@ pub const DeclGen = struct { const param_ty = fn_info.param_types[it.zig_index - 1]; var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) - param_ty.optionalChild(mod).slicePtrFieldType(&buf) + param_ty.optionalChild(mod).slicePtrFieldType(&buf, mod) else - param_ty.slicePtrFieldType(&buf); + param_ty.slicePtrFieldType(&buf, mod); const ptr_llvm_ty = try dg.lowerType(ptr_ty); const len_llvm_ty = try dg.lowerType(Type.usize); @@ -3387,7 +3387,7 @@ pub const DeclGen = struct { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const fields: [2]*llvm.Value = .{ try dg.lowerValue(.{ - .ty = tv.ty.slicePtrFieldType(&buf), + .ty = tv.ty.slicePtrFieldType(&buf, mod), .val = slice.ptr, }), try dg.lowerValue(.{ @@ -4169,7 +4169,7 @@ pub const DeclGen = struct { const mod = self.module; if (tv.ty.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = tv.ty.slicePtrFieldType(&buf); + const ptr_ty = tv.ty.slicePtrFieldType(&buf, mod); var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = tv.val.sliceLen(mod), @@ -6654,7 +6654,7 @@ pub const FuncGen = struct { if (payload_ty.isSlice(mod)) { const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(&slice_buf)); + const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(&slice_buf, mod)); return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), ""); } return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 27a79c1c4518..e3b5d24ed9ff 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -669,7 +669,7 @@ pub const DeclGen = struct { const slice = val.castTag(.slice).?.data; var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); + const ptr_ty = ty.slicePtrFieldType(&buf, mod); try self.lower(ptr_ty, slice.ptr); try self.addInt(Type.usize, slice.len); @@ -2489,7 +2489,7 @@ pub const DeclGen = struct { const index_id = try self.resolve(bin_op.rhs); var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&slice_buf); + const ptr_ty = slice_ty.slicePtrFieldType(&slice_buf, mod); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); const slice_ptr = try self.extractField(ptr_ty, slice_id, 0); @@ -2987,7 +2987,7 @@ pub const DeclGen = struct { var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = if (payload_ty.isSlice(mod)) - payload_ty.slicePtrFieldType(&ptr_buf) + payload_ty.slicePtrFieldType(&ptr_buf, mod) else payload_ty; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 178f9fa64c48..3e4e90951ee6 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -278,7 +278,7 @@ pub const DeclState = struct { var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); var buf = try arena.create(Type.SlicePtrFieldTypeBuffer); - const ptr_ty = ty.slicePtrFieldType(buf); + const ptr_ty = ty.slicePtrFieldType(buf, mod); try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.ensureUnusedCapacity(6); diff --git a/src/type.zig b/src/type.zig index a51ae273c1a3..dac12aa74e01 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2042,7 +2042,18 @@ pub const Type = struct { else => unreachable, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - else => @panic("TODO"), + .ptr_type => |ptr_type| { + if (ptr_type.alignment != 0) { + return @intCast(u32, ptr_type.alignment); + } else if (opt_sema) |sema| { + const res = try ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .{ .sema = sema }); + return res.scalar; + } else { + return (ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; + } + }, + .opt_type => |child| return child.toType().ptrAlignmentAdvanced(mod, opt_sema), + else => unreachable, }, } } @@ -3060,33 +3071,36 @@ pub const Type = struct { pointer: Payload.Pointer, }; - pub fn slicePtrFieldType(self: Type, buffer: *SlicePtrFieldTypeBuffer) Type { - switch (self.tag()) { - .pointer => { - const payload = self.castTag(.pointer).?.data; - assert(payload.size == .Slice); - - buffer.* = .{ - .pointer = .{ - .data = .{ - .pointee_type = payload.pointee_type, - .sentinel = payload.sentinel, - .@"align" = payload.@"align", - .@"addrspace" = payload.@"addrspace", - .bit_offset = payload.bit_offset, - .host_size = payload.host_size, - .vector_index = payload.vector_index, - .@"allowzero" = payload.@"allowzero", - .mutable = payload.mutable, - .@"volatile" = payload.@"volatile", - .size = .Many, + pub fn slicePtrFieldType(ty: Type, buffer: *SlicePtrFieldTypeBuffer, mod: *const Module) Type { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => { + const payload = ty.castTag(.pointer).?.data; + assert(payload.size == .Slice); + + buffer.* = .{ + .pointer = .{ + .data = .{ + .pointee_type = payload.pointee_type, + .sentinel = payload.sentinel, + .@"align" = payload.@"align", + .@"addrspace" = payload.@"addrspace", + .bit_offset = payload.bit_offset, + .host_size = payload.host_size, + .vector_index = payload.vector_index, + .@"allowzero" = payload.@"allowzero", + .mutable = payload.mutable, + .@"volatile" = payload.@"volatile", + .size = .Many, + }, }, - }, - }; - return Type.initPayload(&buffer.pointer.base); - }, + }; + return Type.initPayload(&buffer.pointer.base); + }, - else => unreachable, + else => unreachable, + }, + else => return mod.intern_pool.slicePtrType(ty.ip_index).toType(), } } diff --git a/src/value.zig b/src/value.zig index a34a022deabe..f8188c64ab1d 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2078,7 +2078,7 @@ pub const Value = struct { } var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf); + const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); return eqlAdvanced(a_payload.ptr, ptr_ty, b_payload.ptr, ptr_ty, mod, opt_sema); }, @@ -2237,7 +2237,7 @@ pub const Value = struct { } var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf); + const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); const a_ptr = switch (a_ty.ptrSize(mod)) { .Slice => a.slicePtr(), .One => a, @@ -2376,7 +2376,7 @@ pub const Value = struct { .slice => { const slice = val.castTag(.slice).?.data; var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf); + const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); hash(slice.ptr, ptr_ty, hasher, mod); hash(slice.len, Type.usize, hasher, mod); }, @@ -2499,7 +2499,7 @@ pub const Value = struct { .slice => { const slice = val.castTag(.slice).?.data; var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf); + const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); slice.ptr.hashUncoerced(ptr_ty, hasher, mod); }, else => val.hashPtr(hasher, mod), From 9626811725ba7bb979559b718492b1fbe7fc0578 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 20:34:19 -0700 Subject: [PATCH 034/205] Sema: add typeHasOnePossibleValue logic for InternPool --- src/Sema.zig | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index ced5eb247c4f..43aa7e056ecd 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -32950,14 +32950,27 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } }, .ptr_type => return null, - .array_type => @panic("TODO"), + .array_type => |array_type| { + if (array_type.len == 0) + return Value.initTag(.empty_array); + if ((try sema.typeHasOnePossibleValue(array_type.child.toType())) != null) { + return Value.initTag(.the_only_possible_value); + } + return null; + }, .vector_type => |vector_type| { if (vector_type.len == 0) return Value.initTag(.empty_array); if (try sema.typeHasOnePossibleValue(vector_type.child.toType())) |v| return v; return null; }, - .opt_type => @panic("TODO"), - .error_union_type => @panic("TODO"), + .opt_type => |child| { + if (child.toType().isNoReturn()) { + return Value.null; + } else { + return null; + } + }, + .error_union_type => return null, .simple_type => |t| switch (t) { .f16, .f32, From 6350aabf9d00cbdfe73c6aa0a7d8e8f52fe6d8f1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 20:48:44 -0700 Subject: [PATCH 035/205] InternPool: fix bug in addLimbsExtraAssumeCapacity --- src/InternPool.zig | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 15b6e318edbc..fec5e721d0bb 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1319,7 +1319,7 @@ fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { @sizeOf(u64) => {}, else => @compileError("unsupported host"), } - const result = @intCast(u32, ip.extra.items.len); + const result = @intCast(u32, ip.limbs.items.len); inline for (@typeInfo(@TypeOf(extra)).Struct.fields, 0..) |field, i| { const new: u32 = switch (field.type) { u32 => @field(extra, field.name), @@ -1452,11 +1452,19 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { std.debug.print( \\InternPool size: {d} bytes - \\ items: {d} bytes - \\ extra: {d} bytes - \\ limbs: {d} bytes + \\ {d} items: {d} bytes + \\ {d} extra: {d} bytes + \\ {d} limbs: {d} bytes \\ - , .{ total_size, items_size, extra_size, limbs_size }); + , .{ + total_size, + ip.items.len, + items_size, + ip.extra.items.len, + extra_size, + ip.limbs.items.len, + limbs_size, + }); const tags = ip.items.items(.tag); const datas = ip.items.items(.data); @@ -1512,10 +1520,10 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { } }; counts.sort(SortContext{ .map = &counts }); - const len = @min(50, tags.len); - std.debug.print("top 50 tags:\n", .{}); + const len = @min(50, counts.count()); + std.debug.print(" top 50 tags:\n", .{}); for (counts.keys()[0..len], counts.values()[0..len]) |tag, stats| { - std.debug.print(" {s}: {d} occurrences, {d} total bytes\n", .{ + std.debug.print(" {s}: {d} occurrences, {d} total bytes\n", .{ @tagName(tag), stats.count, stats.bytes, }); } From 27d641eb35820a012e64e99df18c3577100f9dba Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 20:49:09 -0700 Subject: [PATCH 036/205] stage2: fix interned integer value printing --- src/TypedValue.zig | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/TypedValue.zig b/src/TypedValue.zig index d4ee7661d559..ee9a8abf0f81 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -416,8 +416,12 @@ pub fn print( const key = mod.intern_pool.indexToKey(val.ip_index); if (key.typeOf() == .type_type) { return Type.print(val.toType(), writer, mod); - } else { - return writer.print("{}", .{val.ip_index}); + } + switch (key) { + .int => |int| switch (int.storage) { + inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), + }, + else => return writer.print("{}", .{val.ip_index}), } }, }; From f7bd42785bf763e66557f886188ec53824cc45e0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 20:49:42 -0700 Subject: [PATCH 037/205] LLVM backend: update integer constant lowering for InternPool --- src/codegen/llvm.zig | 67 +++++++++++++++++++++++++++----------------- 1 file changed, 41 insertions(+), 26 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5289becf1ead..5ad3c2b8c537 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3257,32 +3257,23 @@ pub const DeclGen = struct { }, // TODO this duplicates code with Pointer but they should share the handling // of the tv.val.tag() and then Int should do extra constPtrToInt on top - .Int => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - else => { - var bigint_space: Value.BigIntSpace = undefined; - const bigint = tv.val.toBigInt(&bigint_space, mod); - const int_info = tv.ty.intInfo(mod); - assert(int_info.bits != 0); - const llvm_type = dg.context.intType(int_info.bits); - - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); - } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); - } - return unsigned_val; + .Int => switch (tv.val.ip_index) { + .none => switch (tv.val.tag()) { + .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), + .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), + else => { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = tv.val.toBigInt(&bigint_space, mod); + return lowerBigInt(dg, tv.ty, bigint); + }, + }, + else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .int => |int| { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int.storage.toBigInt(&bigint_space); + return lowerBigInt(dg, tv.ty, bigint); + }, + else => unreachable, }, }, .Enum => { @@ -3983,6 +3974,30 @@ pub const DeclGen = struct { } } + fn lowerBigInt(dg: *DeclGen, ty: Type, bigint: std.math.big.int.Const) *llvm.Value { + const mod = dg.module; + const int_info = ty.intInfo(mod); + assert(int_info.bits != 0); + const llvm_type = dg.context.intType(int_info.bits); + + const unsigned_val = v: { + if (bigint.limbs.len == 1) { + break :v llvm_type.constInt(bigint.limbs[0], .False); + } + if (@sizeOf(usize) == @sizeOf(u64)) { + break :v llvm_type.constIntOfArbitraryPrecision( + @intCast(c_uint, bigint.limbs.len), + bigint.limbs.ptr, + ); + } + @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); + }; + if (!bigint.positive) { + return llvm.constNeg(unsigned_val); + } + return unsigned_val; + } + const ParentPtr = struct { ty: Type, llvm_ptr: *llvm.Value, From 73720b6975e2650ece48cc5f38495c091360c6c9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 20:50:01 -0700 Subject: [PATCH 038/205] Sema: update onePossibleValue for InternPool --- src/type.zig | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/src/type.zig b/src/type.zig index dac12aa74e01..592eb9a21e76 100644 --- a/src/type.zig +++ b/src/type.zig @@ -3909,15 +3909,27 @@ pub const Type = struct { return null; } }, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), + .ptr_type => return null, + .array_type => |array_type| { + if (array_type.len == 0) + return Value.initTag(.empty_array); + if (array_type.child.toType().onePossibleValue(mod) != null) + return Value.initTag(.the_only_possible_value); + return null; + }, .vector_type => |vector_type| { if (vector_type.len == 0) return Value.initTag(.empty_array); if (vector_type.child.toType().onePossibleValue(mod)) |v| return v; return null; }, - .opt_type => @panic("TODO"), - .error_union_type => @panic("TODO"), + .opt_type => |child| { + if (child.toType().isNoReturn()) { + return Value.null; + } else { + return null; + } + }, + .error_union_type => return null, .simple_type => |t| switch (t) { .f16, .f32, From 75900ec1b5a250935a6abe050a006738fba99e66 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 6 May 2023 19:20:52 -0700 Subject: [PATCH 039/205] stage2: move integer values to InternPool --- src/Air.zig | 1 + src/InternPool.zig | 18 +- src/Module.zig | 73 +- src/RangeSet.zig | 6 +- src/Sema.zig | 738 ++++++++---------- src/TypedValue.zig | 21 +- src/Zir.zig | 1 + src/arch/wasm/CodeGen.zig | 36 +- src/arch/x86_64/CodeGen.zig | 22 +- src/codegen.zig | 65 +- src/codegen/c.zig | 240 +++--- src/codegen/llvm.zig | 105 +-- src/codegen/spirv.zig | 46 +- src/link/Dwarf.zig | 3 +- src/type.zig | 89 +-- src/value.zig | 1431 ++++++++++++++--------------------- 16 files changed, 1168 insertions(+), 1727 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 43fc55e81173..549583e69790 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -913,6 +913,7 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), + negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), void_value = @enumToInt(InternPool.Index.void_value), diff --git a/src/InternPool.zig b/src/InternPool.zig index fec5e721d0bb..d2f3bf81fe97 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -390,6 +390,8 @@ pub const Index = enum(u32) { one, /// `1` (usize) one_usize, + /// `-1` (comptime_int) + negative_one, /// `std.builtin.CallingConvention.C` calling_convention_c, /// `std.builtin.CallingConvention.Inline` @@ -624,6 +626,11 @@ pub const static_keys = [_]Key{ .storage = .{ .u64 = 1 }, } }, + .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .i64 = -1 }, + } }, + .{ .enum_tag = .{ .ty = .calling_convention_type, .tag = .{ @@ -999,23 +1006,23 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_error_union => @panic("TODO"), .type_enum_simple => @panic("TODO"), .simple_internal => @panic("TODO"), - .int_u32 => return .{ .int = .{ + .int_u32 => .{ .int = .{ .ty = .u32_type, .storage = .{ .u64 = data }, } }, - .int_i32 => return .{ .int = .{ + .int_i32 => .{ .int = .{ .ty = .i32_type, .storage = .{ .i64 = @bitCast(i32, data) }, } }, - .int_usize => return .{ .int = .{ + .int_usize => .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = data }, } }, - .int_comptime_int_u32 => return .{ .int = .{ + .int_comptime_int_u32 => .{ .int = .{ .ty = .comptime_int_type, .storage = .{ .u64 = data }, } }, - .int_comptime_int_i32 => return .{ .int = .{ + .int_comptime_int_i32 => .{ .int = .{ .ty = .comptime_int_type, .storage = .{ .i64 = @bitCast(i32, data) }, } }, @@ -1137,6 +1144,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .int => |int| b: { switch (int.ty) { + .none => unreachable, .u32_type => switch (int.storage) { .big_int => |big_int| { if (big_int.to(u32)) |casted| { diff --git a/src/Module.zig b/src/Module.zig index 01e240337720..9315c9efa718 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6597,7 +6597,7 @@ pub fn populateTestFunctions( field_vals.* = .{ try Value.Tag.slice.create(arena, .{ .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl_index), - .len = try Value.Tag.int_u64.create(arena, test_name_slice.len), + .len = try mod.intValue(Type.usize, test_name_slice.len), }), // name try Value.Tag.decl_ref.create(arena, test_decl_index), // func Value.null, // async_frame_size @@ -6628,7 +6628,7 @@ pub fn populateTestFunctions( new_var.* = decl.val.castTag(.variable).?.data.*; new_var.init = try Value.Tag.slice.create(arena, .{ .ptr = try Value.Tag.decl_ref.create(arena, array_decl_index), - .len = try Value.Tag.int_u64.create(arena, mod.test_functions.count()), + .len = try mod.intValue(Type.usize, mod.test_functions.count()), }); const new_val = try Value.Tag.variable.create(arena, new_var); @@ -6875,6 +6875,38 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); } +pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { + if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); + if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); + var limbs_buffer: [4]usize = undefined; + var big_int = BigIntMutable.init(&limbs_buffer, x); + return intValue_big(mod, ty, big_int.toConst()); +} + +pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.ip_index, + .storage = .{ .big_int = x }, + } }); + return i.toValue(); +} + +pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.ip_index, + .storage = .{ .u64 = x }, + } }); + return i.toValue(); +} + +pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.ip_index, + .storage = .{ .i64 = x }, + } }); + return i.toValue(); +} + pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); } @@ -6907,32 +6939,27 @@ pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { /// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { assert(!val.isUndef()); - switch (val.tag()) { - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true }; - return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - // Zero is still a possibility, in which case unsigned is fine - for (limbs) |limb| { - if (limb != 0) break; - } else return 0; // val == 0 - assert(sign); - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false }; - return @intCast(u16, big.bitCountTwosComp()); - }, - .int_i64 => { - const x = val.castTag(.int_i64).?.data; - if (x >= 0) return Type.smallestUnsignedBits(@intCast(u64, x)); + + const key = mod.intern_pool.indexToKey(val.ip_index); + switch (key.int.storage) { + .i64 => |x| { + if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted); assert(sign); + // Protect against overflow in the following negation. + if (x == std.math.minInt(i64)) return 64; return Type.smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; }, - else => { - const x = val.toUnsignedInt(mod); + .u64 => |x| { return Type.smallestUnsignedBits(x) + @boolToInt(sign); }, + .big_int => |big| { + if (big.positive) return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); + + // Zero is still a possibility, in which case unsigned is fine + if (big.eqZero()) return 0; + + return @intCast(u16, big.bitCountTwosComp()); + }, } } diff --git a/src/RangeSet.zig b/src/RangeSet.zig index 2e28a562c6fc..a015c7b56845 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -35,8 +35,8 @@ pub fn add( src: SwitchProngSrc, ) !?SwitchProngSrc { for (self.ranges.items) |range| { - if (last.compareAll(.gte, range.first, ty, self.module) and - first.compareAll(.lte, range.last, ty, self.module)) + if (last.compareScalar(.gte, range.first, ty, self.module) and + first.compareScalar(.lte, range.last, ty, self.module)) { return range.src; // They overlap. } @@ -53,7 +53,7 @@ const LessThanContext = struct { ty: Type, module: *Module }; /// Assumes a and b do not overlap fn lessThan(ctx: LessThanContext, a: Range, b: Range) bool { - return a.first.compareAll(.lt, b.first, ctx.ty, ctx.module); + return a.first.compareScalar(.lt, b.first, ctx.ty, ctx.module); } pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { diff --git a/src/Sema.zig b/src/Sema.zig index 43aa7e056ecd..3aa845c10b8f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2995,7 +2995,6 @@ fn zirEnumDecl( var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; var last_tag_val: ?Value = null; - var tag_val_buf: Value.Payload.U64 = undefined; while (field_i < fields_len) : (field_i += 1) { if (field_i % 32 == 0) { cur_bit_bag = sema.code.extra[bit_bag_index]; @@ -3084,11 +3083,7 @@ fn zirEnumDecl( return sema.failWithOwnedErrorMsg(msg); } } else { - tag_val_buf = .{ - .base = .{ .tag = .int_u64 }, - .data = field_i, - }; - last_tag_val = Value.initPayload(&tag_val_buf.base); + last_tag_val = try mod.intValue(enum_obj.tag_ty, field_i); } if (!(try sema.intFitsInType(last_tag_val.?, enum_obj.tag_ty, null))) { @@ -5180,16 +5175,23 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); - const arena = sema.arena; + const mod = sema.mod; const int = sema.code.instructions.items(.data)[inst].str; const byte_count = int.len * @sizeOf(std.math.big.Limb); const limb_bytes = sema.code.string_bytes[int.start..][0..byte_count]; - const limbs = try arena.alloc(std.math.big.Limb, int.len); + + // TODO: this allocation and copy is only needed because the limbs may be unaligned. + // If ZIR is adjusted so that big int limbs are guaranteed to be aligned, these + // two lines can be removed. + const limbs = try sema.arena.alloc(std.math.big.Limb, int.len); @memcpy(mem.sliceAsBytes(limbs), limb_bytes); return sema.addConstant( Type.comptime_int, - try Value.Tag.int_big_positive.create(arena, limbs), + try mod.intValue_big(Type.comptime_int, .{ + .limbs = limbs, + .positive = true, + }), ); } @@ -8095,6 +8097,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -8107,12 +8110,13 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } switch (val.tag()) { .@"error" => { - const payload = try sema.arena.create(Value.Payload.U64); - payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, - }; - return sema.addConstant(Type.err_int, Value.initPayload(&payload.base)); + return sema.addConstant( + Type.err_int, + try mod.intValue( + Type.err_int, + (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, + ), + ); }, // This is not a valid combination with the type `anyerror`. @@ -8280,8 +8284,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } if (try sema.resolveMaybeUndefVal(enum_tag)) |enum_tag_val| { - var buffer: Value.Payload.U64 = undefined; - const val = enum_tag_val.enumToInt(enum_tag_ty, &buffer); + const val = try enum_tag_val.enumToInt(enum_tag_ty, mod); return sema.addConstant(int_tag_ty, try val.copy(sema.arena)); } @@ -9685,7 +9688,7 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, mod); + const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod); const dest_max_val = if (is_vector) try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) else @@ -9946,7 +9949,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(dest_ty, try operand_val.floatCast(sema.arena, dest_ty, target)); + return sema.addConstant(dest_ty, try operand_val.floatCast(sema.arena, dest_ty, mod)); } if (dest_is_comptime_float) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_float'", .{}); @@ -10470,7 +10473,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // Duplicate checking variables later also used for `inline else`. var seen_enum_fields: []?Module.SwitchProngSrc = &.{}; var seen_errors = SwitchErrorSet.init(gpa); - var range_set = RangeSet.init(gpa, sema.mod); + var range_set = RangeSet.init(gpa, mod); var true_count: u8 = 0; var false_count: u8 = 0; @@ -10596,11 +10599,11 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .{field_name}, ); } - try sema.mod.errNoteNonLazy( - operand_ty.declSrcLoc(sema.mod), + try mod.errNoteNonLazy( + operand_ty.declSrcLoc(mod), msg, "enum '{}' declared here", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ); break :msg msg; }; @@ -10827,7 +10830,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError defer arena.deinit(); const min_int = try operand_ty.minInt(arena.allocator(), mod); - const max_int = try operand_ty.maxInt(arena.allocator(), mod); + const max_int = try operand_ty.maxIntScalar(mod); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( @@ -10926,13 +10929,13 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, src, "else prong required when switching on type '{}'", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ); } var seen_values = ValueSrcMap.initContext(gpa, .{ .ty = operand_ty, - .mod = sema.mod, + .mod = mod, }); defer seen_values.deinit(); @@ -10996,7 +10999,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .ComptimeFloat, .Float, => return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } @@ -11054,7 +11057,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = try sema.resolveInst(item_ref); // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable; - if (operand_val.eql(item_val, operand_ty, sema.mod)) { + if (operand_val.eql(item_val, operand_ty, mod)) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11080,7 +11083,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = try sema.resolveInst(item_ref); // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable; - if (operand_val.eql(item_val, operand_ty, sema.mod)) { + if (operand_val.eql(item_val, operand_ty, mod)) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11128,7 +11131,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand)) { return Air.Inst.Ref.unreachable_value; } - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and + if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { try sema.zirDbgStmt(block, cond_dbg_node_index); @@ -11182,7 +11185,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; @@ -11245,9 +11248,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item_last_ref = try sema.resolveInst(last_ref); const item_last = sema.resolveConstValue(block, .unneeded, item_last_ref, undefined) catch unreachable; - while (item.compareAll(.lte, item_last, operand_ty, sema.mod)) : ({ + while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({ // Previous validation has resolved any possible lazy values. - item = try sema.intAddScalar(item, Value.one); + item = try sema.intAddScalar(item, Value.one, operand_ty); }) { cases_len += 1; @@ -11260,7 +11263,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .range = .{ .prong = multi_i, .item = range_i } }; - const decl = sema.mod.declPtr(case_block.src_decl); + const decl = mod.declPtr(case_block.src_decl); try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); unreachable; }, @@ -11289,14 +11292,14 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }; - const decl = sema.mod.declPtr(case_block.src_decl); + const decl = mod.declPtr(case_block.src_decl); try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); unreachable; }, @@ -11333,7 +11336,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError for (items) |item_ref| { const item = try sema.resolveInst(item_ref); const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else @@ -11461,7 +11464,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .Enum => { if (operand_ty.isNonexhaustiveEnum() and !union_originally) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } for (seen_enum_fields, 0..) |f, i| { @@ -11476,7 +11479,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.wip_capture_scope = child_block.wip_capture_scope; const analyze_body = if (union_originally) blk: { - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; @@ -11499,7 +11502,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .ErrorSet => { if (operand_ty.isAnyError()) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } for (operand_ty.errorSetNames()) |error_name| { @@ -11587,7 +11590,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } }, else => return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), }; @@ -11598,7 +11601,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.wip_capture_scope = wip_captures.scope; case_block.inline_case_capture = .none; - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and + if (mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { try sema.zirDbgStmt(&case_block, cond_dbg_node_index); @@ -11679,7 +11682,7 @@ const RangeSetUnhandledIterator = struct { fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const mod = sema.mod; const min = try ty.minInt(sema.arena, mod); - const max = try ty.maxInt(sema.arena, mod); + const max = try ty.maxIntScalar(mod); return RangeSetUnhandledIterator{ .sema = sema, @@ -11693,19 +11696,19 @@ const RangeSetUnhandledIterator = struct { fn next(it: *RangeSetUnhandledIterator) !?Value { while (it.range_i < it.ranges.len) : (it.range_i += 1) { if (!it.first) { - it.cur = try it.sema.intAdd(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); } it.first = false; - if (it.cur.compareAll(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { + if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { return it.cur; } it.cur = it.ranges[it.range_i].last; } if (!it.first) { - it.cur = try it.sema.intAdd(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); } it.first = false; - if (it.cur.compareAll(.lte, it.max, it.ty, it.sema.mod)) { + if (it.cur.compareScalar(.lte, it.max, it.ty, it.sema.mod)) { return it.cur; } return null; @@ -11750,7 +11753,7 @@ fn validateSwitchRange( ) CompileError!void { const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; - if (first_val.compareAll(.gt, last_val, operand_ty, sema.mod)) { + if (first_val.compareScalar(.gt, last_val, operand_ty, sema.mod)) { const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), src_node_offset, .first); return sema.fail(block, src, "range start value is greater than the end value", .{}); } @@ -12208,16 +12211,11 @@ fn zirShl( return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { - var bits_payload = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(mod).bits, - }; - const bit_value = Value.initPayload(&bits_payload.base); + const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + const rhs_elem = try rhs_val.elemValue(sema.mod, i); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), @@ -12236,8 +12234,7 @@ fn zirShl( if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + const rhs_elem = try rhs_val.elemValue(sema.mod, i); if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), @@ -12309,7 +12306,7 @@ fn zirShl( if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); + const bit_count_val = try mod.intValue(scalar_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); @@ -12396,16 +12393,11 @@ fn zirShr( return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { - var bits_payload = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(mod).bits, - }; - const bit_value = Value.initPayload(&bits_payload.base); + const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + const rhs_elem = try rhs_val.elemValue(sema.mod, i); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), @@ -12424,8 +12416,7 @@ fn zirShr( if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + const rhs_elem = try rhs_val.elemValue(sema.mod, i); if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), @@ -12465,7 +12456,7 @@ fn zirShr( if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); + const bit_count_val = try mod.intValue(scalar_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); @@ -12587,10 +12578,9 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.addConstUndef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); - var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_val_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod); } return sema.addConstant( @@ -12695,6 +12685,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); @@ -12714,11 +12705,11 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: { if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined); - return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); + return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)}); }; const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse { assert(!rhs_is_tuple); - return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(sema.mod)}); + return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(mod)}); }; const resolved_elem_ty = t: { @@ -12780,8 +12771,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ), }; - const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod); - const mod = sema.mod; + const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, mod); const ptr_addrspace = p: { if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod); if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod); @@ -12815,7 +12805,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_elem_i = elem_i; const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i) else lhs_info.elem_type; const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.@"unreachable"; - const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); @@ -12825,7 +12815,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs_elem_i = elem_i - lhs_len; const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i) else rhs_info.elem_type; const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.@"unreachable"; - const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(sema.mod, sema.arena, rhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); @@ -12842,12 +12832,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.requireRuntimeBlock(block, src, runtime_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = result_ty, .@"addrspace" = ptr_as, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = resolved_elem_ty, .@"addrspace" = ptr_as, }); @@ -13009,6 +12999,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); @@ -13025,10 +13016,9 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } // Analyze the lhs first, to catch the case that someone tried to do exponentiation - const mod = sema.mod; const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse { const msg = msg: { - const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); switch (lhs_ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => { @@ -13048,7 +13038,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.fail(block, rhs_src, "operation results in overflow", .{}); const result_len = try sema.usizeCast(block, src, result_len_u64); - const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod); + const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, mod); const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); @@ -13065,7 +13055,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Optimization for the common pattern of a single element repeated N times, such // as zero-filling a byte array. if (lhs_len == 1) { - const elem_val = try lhs_sub_val.elemValue(sema.mod, sema.arena, 0); + const elem_val = try lhs_sub_val.elemValue(mod, 0); break :v try Value.Tag.repeated.create(sema.arena, elem_val); } @@ -13074,7 +13064,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai while (elem_i < result_len) { var lhs_i: usize = 0; while (lhs_i < lhs_len) : (lhs_i += 1) { - const elem_val = try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_i); + const elem_val = try lhs_sub_val.elemValue(mod, lhs_i); element_vals[elem_i] = elem_val; elem_i += 1; } @@ -13090,12 +13080,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.requireRuntimeBlock(block, src, lhs_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = result_ty, .@"addrspace" = ptr_as, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = lhs_info.elem_type, .@"addrspace" = ptr_as, }); @@ -13797,7 +13787,7 @@ fn addDivIntOverflowSafety( } const min_int = try resolved_type.minInt(sema.arena, mod); - const neg_one_scalar = try Value.Tag.int_i64.create(sema.arena, -1); + const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1); const neg_one = if (resolved_type.zigTypeTag(mod) == .Vector) try Value.Tag.repeated.create(sema.arena, neg_one_scalar) else @@ -13806,12 +13796,12 @@ fn addDivIntOverflowSafety( // If the LHS is comptime-known to be not equal to the min int, // no overflow is possible. if (maybe_lhs_val) |lhs_val| { - if (lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return; + if (try lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return; } // If the RHS is comptime-known to not be equal to -1, no overflow is possible. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return; + if (try rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return; } var ok: Air.Inst.Ref = .none; @@ -14038,23 +14028,18 @@ fn intRem( const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty); } return Value.Tag.aggregate.create(sema.arena, result_data); } - return sema.intRemScalar(lhs, rhs); + return sema.intRemScalar(lhs, rhs, ty); } -fn intRemScalar( - sema: *Sema, - lhs: Value, - rhs: Value, -) CompileError!Value { +fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileError!Value { const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -14079,7 +14064,7 @@ fn intRemScalar( var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return Value.fromBigInt(sema.arena, result_r.toConst()); + return mod.intValue_big(scalar_ty, result_r.toConst()); } fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -15063,7 +15048,7 @@ fn analyzePtrArithmetic( .ptr_sub => addr - elem_size * offset_int, else => unreachable, }; - const new_ptr_val = try Value.Tag.int_u64.create(sema.arena, new_addr); + const new_ptr_val = try mod.intValue(new_ptr_ty, new_addr); return sema.addConstant(new_ptr_ty, new_ptr_val); } if (air_tag == .ptr_sub) { @@ -15826,9 +15811,9 @@ fn zirBuiltinSrc( // fn_name: [:0]const u8, field_values[1] = func_name_val; // line: u32 - field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try Value.Tag.int_u64.create(sema.arena, extra.line + 1)); + field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try mod.intValue(Type.u32, extra.line + 1)); // column: u32, - field_values[3] = try Value.Tag.int_u64.create(sema.arena, extra.column + 1); + field_values[3] = try mod.intValue(Type.u32, extra.column + 1); return sema.addConstant( try sema.getBuiltinType("SourceLocation"), @@ -15977,7 +15962,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(sema.arena, .{ .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, param_vals.len), + .len = try mod.intValue(Type.usize, param_vals.len), }); }; @@ -15994,7 +15979,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // calling_convention: CallingConvention, try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)), // alignment: comptime_int, - try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(mod)), + try mod.intValue(Type.comptime_int, ty.abiAlignment(mod)), // is_generic: bool, Value.makeBool(info.is_generic), // is_var_args: bool, @@ -16022,7 +16007,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai @enumToInt(info.signedness), ); // bits: comptime_int, - field_values[1] = try Value.Tag.int_u64.create(sema.arena, info.bits); + field_values[1] = try mod.intValue(Type.comptime_int, info.bits); return sema.addConstant( type_info_ty, @@ -16035,7 +16020,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Float => { const field_values = try sema.arena.alloc(Value, 1); // bits: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(mod)); + field_values[0] = try mod.intValue(Type.comptime_int, ty.bitSize(mod)); return sema.addConstant( type_info_ty, @@ -16048,7 +16033,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Pointer => { const info = ty.ptrInfo(mod); const alignment = if (info.@"align" != 0) - try Value.Tag.int_u64.create(sema.arena, info.@"align") + try mod.intValue(Type.comptime_int, info.@"align") else try info.pointee_type.lazyAbiAlignment(mod, sema.arena); @@ -16084,7 +16069,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = ty.arrayInfo(mod); const field_values = try sema.arena.alloc(Value, 3); // len: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); + field_values[0] = try mod.intValue(Type.comptime_int, info.len); // child: type, field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); // sentinel: ?*const anyopaque, @@ -16102,7 +16087,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = ty.arrayInfo(mod); const field_values = try sema.arena.alloc(Value, 2); // len: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); + field_values[0] = try mod.intValue(Type.comptime_int, info.len); // child: type, field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); @@ -16202,7 +16187,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const new_decl_val = try Value.Tag.decl_ref.create(sema.arena, new_decl); const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = new_decl_val, - .len = try Value.Tag.int_u64.create(sema.arena, vals.len), + .len = try mod.intValue(Type.usize, vals.len), }); break :v try Value.Tag.opt_payload.create(sema.arena, slice_val); } else Value.null; @@ -16263,8 +16248,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const tag_val = Value.initPayload(&tag_val_payload.base); - var buffer: Value.Payload.U64 = undefined; - const int_val = try tag_val.enumToInt(ty, &buffer).copy(fields_anon_decl.arena()); + const int_val = try tag_val.enumToInt(ty, mod); const name = enum_fields.keys()[i]; const name_val = v: { @@ -16379,7 +16363,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // type: type, try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty), // alignment: comptime_int, - try Value.Tag.int_u64.create(fields_anon_decl.arena(), alignment), + try mod.intValue(Type.comptime_int, alignment), }; field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), union_field_fields); } @@ -16398,7 +16382,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(sema.arena, .{ .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, union_field_vals.len), + .len = try mod.intValue(Type.usize, union_field_vals.len), }); }; @@ -16476,7 +16460,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try Value.Tag.int_u64.create(fields_anon_decl.arena(), bytes.len), + .len = try mod.intValue(Type.usize, bytes.len), }); }; @@ -16518,7 +16502,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try Value.Tag.int_u64.create(fields_anon_decl.arena(), bytes.len), + .len = try mod.intValue(Type.usize, bytes.len), }); }; @@ -16540,7 +16524,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(field.is_comptime), // alignment: comptime_int, - try Value.Tag.int_u64.create(fields_anon_decl.arena(), alignment), + try mod.intValue(Type.comptime_int, alignment), }; field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); } @@ -16561,7 +16545,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(sema.arena, .{ .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, struct_field_vals.len), + .len = try mod.intValue(Type.usize, struct_field_vals.len), }); }; @@ -16636,6 +16620,7 @@ fn typeInfoDecls( type_info_ty: Type, opt_namespace: ?*Module.Namespace, ) CompileError!Value { + const mod = sema.mod; var decls_anon_decl = try block.startAnonDecl(); defer decls_anon_decl.deinit(); @@ -16646,9 +16631,9 @@ fn typeInfoDecls( type_info_ty.getNamespace().?, "Declaration", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); try sema.ensureDeclAnalyzed(declaration_ty_decl_index); - const declaration_ty_decl = sema.mod.declPtr(declaration_ty_decl_index); + const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); break :t try declaration_ty_decl.val.toType().copy(decls_anon_decl.arena()); }; try sema.queueFullTypeResolution(try declaration_ty.copy(sema.arena)); @@ -16676,7 +16661,7 @@ fn typeInfoDecls( ); return try Value.Tag.slice.create(sema.arena, .{ .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, decl_vals.items.len), + .len = try mod.intValue(Type.usize, decl_vals.items.len), }); } @@ -16713,7 +16698,7 @@ fn typeInfoNamespaceDecls( ); break :v try Value.Tag.slice.create(decls_anon_decl, .{ .ptr = try Value.Tag.decl_ref.create(decls_anon_decl, new_decl), - .len = try Value.Tag.int_u64.create(decls_anon_decl, bytes.len), + .len = try mod.intValue(Type.usize, bytes.len), }); }; @@ -18620,10 +18605,9 @@ fn zirUnaryMath( if (val.isUndef()) return sema.addConstUndef(result_ty); - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod); } return sema.addConstant( @@ -18717,7 +18701,12 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return block.addUnOp(.tag_name, casted_operand); } -fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { +fn zirReify( + sema: *Sema, + block: *Block, + extended: Zir.Inst.Extended.InstData, + inst: Zir.Inst.Index, +) CompileError!Air.Inst.Ref { const mod = sema.mod; const name_strategy = @intToEnum(Zir.Inst.NameStrategy, extended.small); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; @@ -18730,7 +18719,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const union_val = val.cast(Value.Payload.Union).?.data; const target = mod.getTarget(); const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag, mod).?; - if (union_val.val.anyUndef(mod)) return sema.failWithUseOfUndef(block, src); + if (try union_val.val.anyUndef(mod)) return sema.failWithUseOfUndef(block, src); switch (@intToEnum(std.builtin.TypeId, tag_index)) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, @@ -18845,10 +18834,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in } else if (ptr_size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); @@ -18893,7 +18882,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in break :blk (try sema.pointerDeref(block, src, p.data, ptr_ty)).?; } else null; - const ty = try Type.array(sema.arena, len, sentinel, child_ty, sema.mod); + const ty = try Type.array(sema.arena, len, sentinel, child_ty, mod); return sema.addType(ty); }, .Optional => { @@ -18938,13 +18927,12 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in try names.ensureUnusedCapacity(sema.arena, len); var i: usize = 0; while (i < len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = slice_val.ptr.elemValueBuffer(mod, i, &buf); + const elem_val = try slice_val.ptr.elemValue(mod, i); const struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // error_set: type, const name_val = struct_val[0]; - const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, sema.mod); + const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); const kv = try mod.getErrorValue(name_str); const gop = names.getOrPutAssumeCapacity(kv.key); @@ -19061,7 +19049,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in var field_i: usize = 0; while (field_i < fields_len) : (field_i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, field_i); + const elem_val = try fields_val.elemValue(mod, field_i); const field_struct_val: []const Value = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -19072,7 +19060,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const field_name = try name_val.toAllocatedBytes( Type.const_slice_u8, new_decl_arena_allocator, - sema.mod, + mod, ); if (!try sema.intFitsInType(value_val, enum_obj.tag_ty, null)) { @@ -19183,7 +19171,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in Type.Tag.union_tagged else if (layout != .Auto) Type.Tag.@"union" - else switch (block.sema.mod.optimizeMode()) { + else switch (mod.optimizeMode()) { .Debug, .ReleaseSafe => Type.Tag.union_safety_tagged, .ReleaseFast, .ReleaseSmall => Type.Tag.@"union", }; @@ -19236,7 +19224,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); + const elem_val = try fields_val.elemValue(mod, i); const field_struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -19249,7 +19237,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const field_name = try name_val.toAllocatedBytes( Type.const_slice_u8, new_decl_arena_allocator, - sema.mod, + mod, ); if (enum_field_names) |set| { @@ -19260,7 +19248,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const enum_has_field = names.orderedRemove(field_name); if (!enum_has_field) { const msg = msg: { - const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; @@ -19293,10 +19281,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in } if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), field_ty, .union_field); try sema.addDeclaredHereNote(msg, field_ty); @@ -19305,10 +19293,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.failWithOwnedErrorMsg(msg); } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { - const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl), field_ty); try sema.addDeclaredHereNote(msg, field_ty); @@ -19386,8 +19374,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in var noalias_bits: u32 = 0; var i: usize = 0; while (i < args_len) : (i += 1) { - var arg_buf: Value.ElemValueBuffer = undefined; - const arg = args_slice_val.ptr.elemValueBuffer(mod, i, &arg_buf); + const arg = try args_slice_val.ptr.elemValue(mod, i); const arg_val = arg.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // is_generic: bool, @@ -19486,7 +19473,7 @@ fn reifyStruct( try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); + const elem_val = try fields_val.elemValue(sema.mod, i); const field_struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -19892,12 +19879,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)}); - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = addr, - }; - return sema.addConstant(ptr_ty, Value.initPayload(&val_payload.base)); + return sema.addConstant(ptr_ty, try mod.intValue(ptr_ty, addr)); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -19908,14 +19890,9 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (ptr_align > 1) { - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_align - 1, - }; const align_minus_1 = try sema.addConstant( Type.usize, - Value.initPayload(&val_payload.base), + try mod.intValue(Type.usize, ptr_align - 1), ); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); @@ -20254,10 +20231,9 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod), ); } - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod); } return sema.addConstant( @@ -20302,14 +20278,9 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (block.wantSafety() and dest_align > 1 and try sema.typeHasRuntimeBits(ptr_info.pointee_type)) { - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = dest_align - 1, - }; const align_minus_1 = try sema.addConstant( Type.usize, - Value.initPayload(&val_payload.base), + try mod.intValue(Type.usize, dest_align - 1), ); const actual_ptr = if (ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr, ptr_ty) @@ -20359,13 +20330,12 @@ fn zirBitCount( if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); const count = comptimeOp(elem_val, scalar_ty, mod); - elem.* = try Value.Tag.int_u64.create(sema.arena, count); + elem.* = try mod.intValue(scalar_ty, count); } return sema.addConstant( result_ty, @@ -20429,10 +20399,9 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try elem_val.byteSwap(operand_ty, mod, sema.arena); } return sema.addConstant( @@ -20478,10 +20447,9 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try elem_val.bitReverse(scalar_ty, mod, sema.arena); } return sema.addConstant( @@ -21241,11 +21209,10 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty); - var accum: Value = try operand_val.elemValue(mod, sema.arena, 0); - var elem_buf: Value.ElemValueBuffer = undefined; + var accum: Value = try operand_val.elemValue(mod, 0); var i: u32 = 1; while (i < vec_len) : (i += 1) { - const elem_val = operand_val.elemValueBuffer(mod, i, &elem_buf); + const elem_val = try operand_val.elemValue(mod, i); switch (operation) { .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod), .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod), @@ -21359,8 +21326,7 @@ fn analyzeShuffle( var i: usize = 0; while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(sema.mod, i, &buf); + const elem = try mask.elemValue(sema.mod, i); if (elem.isUndef()) continue; const int = elem.toSignedInt(mod); var unsigned: u32 = undefined; @@ -21398,8 +21364,7 @@ fn analyzeShuffle( i = 0; while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const mask_elem_val = mask.elemValueBuffer(sema.mod, i, &buf); + const mask_elem_val = try mask.elemValue(sema.mod, i); if (mask_elem_val.isUndef()) { values[i] = Value.undef; continue; @@ -21407,9 +21372,9 @@ fn analyzeShuffle( const int = mask_elem_val.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); if (int >= 0) { - values[i] = try a_val.elemValue(sema.mod, sema.arena, unsigned); + values[i] = try a_val.elemValue(sema.mod, unsigned); } else { - values[i] = try b_val.elemValue(sema.mod, sema.arena, unsigned); + values[i] = try b_val.elemValue(sema.mod, unsigned); } } const res_val = try Value.Tag.aggregate.create(sema.arena, values); @@ -21430,7 +21395,7 @@ fn analyzeShuffle( const expand_mask_values = try sema.arena.alloc(Value, max_len); i = 0; while (i < min_len) : (i += 1) { - expand_mask_values[i] = try Value.Tag.int_u64.create(sema.arena, i); + expand_mask_values[i] = try mod.intValue(Type.comptime_int, i); } while (i < max_len) : (i += 1) { expand_mask_values[i] = Value.negative_one; @@ -21509,15 +21474,14 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C if (maybe_b) |b_val| { if (b_val.isUndef()) return sema.addConstUndef(vec_ty); - var buf: Value.ElemValueBuffer = undefined; const elems = try sema.gpa.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const pred_elem_val = pred_val.elemValueBuffer(sema.mod, i, &buf); + const pred_elem_val = try pred_val.elemValue(sema.mod, i); const should_choose_a = pred_elem_val.toBool(mod); if (should_choose_a) { - elem.* = a_val.elemValueBuffer(sema.mod, i, &buf); + elem.* = try a_val.elemValue(sema.mod, i); } else { - elem.* = b_val.elemValueBuffer(sema.mod, i, &buf); + elem.* = try b_val.elemValue(sema.mod, i); } } @@ -22067,12 +22031,10 @@ fn analyzeMinMax( cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); continue; }; - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const lhs_elem_val = cur_val.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem_val = operand_val.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem_val = try cur_val.elemValue(mod, i); + const rhs_elem_val = try operand_val.elemValue(mod, i); elem.* = opFunc(lhs_elem_val, rhs_elem_val, mod); } cur_minmax = try sema.addConstant( @@ -22105,10 +22067,10 @@ fn analyzeMinMax( if (len == 0) break :blk orig_ty; if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats - var cur_min: Value = try val.elemValue(mod, sema.arena, 0); + var cur_min: Value = try val.elemValue(mod, 0); var cur_max: Value = cur_min; for (1..len) |idx| { - const elem_val = try val.elemValue(mod, sema.arena, idx); + const elem_val = try val.elemValue(mod, idx); if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val; if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val; @@ -23987,7 +23949,7 @@ fn fieldVal( if (mem.eql(u8, field_name, "len")) { return sema.addConstant( Type.usize, - try Value.Tag.int_u64.create(arena, inner_ty.arrayLen(mod)), + try mod.intValue(Type.usize, inner_ty.arrayLen(mod)), ); } else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); @@ -24179,7 +24141,7 @@ fn fieldPtr( defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.usize, - try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen(mod)), + try mod.intValue(Type.usize, inner_ty.arrayLen(mod)), 0, // default alignment )); } else { @@ -25352,7 +25314,7 @@ fn elemValArray( } if (maybe_index_val) |index_val| { const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_val = try array_val.elemValue(sema.mod, sema.arena, index); + const elem_val = try array_val.elemValue(mod, index); return sema.addConstant(elem_ty, elem_val); } } @@ -25914,7 +25876,7 @@ fn coerceExtra( // we use a dummy pointer value with the required alignment. const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = if (dest_info.@"align" != 0) - try Value.Tag.int_u64.create(sema.arena, dest_info.@"align") + try mod.intValue(Type.usize, dest_info.@"align") else try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), .len = Value.zero, @@ -26022,7 +25984,7 @@ fn coerceExtra( .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) { .ComptimeFloat => { const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const result_val = try val.floatCast(sema.arena, dest_ty, target); + const result_val = try val.floatCast(sema.arena, dest_ty, mod); return try sema.addConstant(dest_ty, result_val); }, .Float => { @@ -26030,7 +25992,7 @@ fn coerceExtra( return sema.addConstUndef(dest_ty); } if (try sema.resolveMaybeUndefVal(inst)) |val| { - const result_val = try val.floatCast(sema.arena, dest_ty, target); + const result_val = try val.floatCast(sema.arena, dest_ty, mod); if (!val.eql(result_val, inst_ty, sema.mod)) { return sema.fail( block, @@ -27431,11 +27393,13 @@ fn storePtrVal( const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), }; operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), @@ -27589,7 +27553,7 @@ fn beginComptimePtrMutation( assert(bytes.len >= dest_len); const elems = try arena.alloc(Value, @intCast(usize, dest_len)); for (elems, 0..) |*elem, i| { - elem.* = try Value.Tag.int_u64.create(arena, bytes[i]); + elem.* = try mod.intValue(elem_ty, bytes[i]); } val_ptr.* = try Value.Tag.aggregate.create(arena, elems); @@ -27618,7 +27582,7 @@ fn beginComptimePtrMutation( const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; const elems = try arena.alloc(Value, @intCast(usize, dest_len)); for (bytes, 0..) |byte, i| { - elems[i] = try Value.Tag.int_u64.create(arena, byte); + elems[i] = try mod.intValue(elem_ty, byte); } if (parent.ty.sentinel(mod)) |sent_val| { assert(elems.len == bytes.len + 1); @@ -28111,7 +28075,7 @@ fn beginComptimePtrLoad( maybe_array_ty: ?Type, ) ComptimePtrLoadError!ComptimePtrLoadKit { const mod = sema.mod; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); var deref: ComptimePtrLoadKit = switch (ptr_val.ip_index) { .null_value => { @@ -28128,7 +28092,7 @@ fn beginComptimePtrLoad( else => unreachable, }; const is_mutable = ptr_val.tag() == .decl_ref_mut; - const decl = sema.mod.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad; @@ -28150,7 +28114,7 @@ fn beginComptimePtrLoad( // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { - assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, sema.mod))); + assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, mod))); } if (elem_ptr.index != 0) { @@ -28184,11 +28148,11 @@ fn beginComptimePtrLoad( if (maybe_array_ty) |load_ty| { // It's possible that we're loading a [N]T, in which case we'd like to slice // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, sema.mod)) { + if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) { const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ - .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), - .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), + .ty = try Type.array(sema.arena, N, null, elem_ty, mod), + .val = try array_tv.val.sliceArray(mod, sema.arena, elem_ptr.index, elem_ptr.index + N), } else null; break :blk deref; } @@ -28209,7 +28173,7 @@ fn beginComptimePtrLoad( } deref.pointee = TypedValue{ .ty = elem_ty, - .val = try array_tv.val.elemValue(sema.mod, sema.arena, elem_ptr.index), + .val = try array_tv.val.elemValue(mod, elem_ptr.index), }; break :blk deref; }, @@ -28329,12 +28293,6 @@ fn beginComptimePtrLoad( break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null); }, - .zero, - .one, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, .variable, .extern_fn, .function, @@ -28342,7 +28300,10 @@ fn beginComptimePtrLoad( else => unreachable, }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .int => return error.RuntimeLoad, + else => unreachable, + }, }; if (deref.pointee) |tv| { @@ -28373,9 +28334,9 @@ fn bitCast( if (old_bits != dest_bits) { return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{ - dest_ty.fmt(sema.mod), + dest_ty.fmt(mod), dest_bits, - old_ty.fmt(sema.mod), + old_ty.fmt(mod), old_bits, }); } @@ -28407,6 +28368,7 @@ fn bitCastVal( const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => return null, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}), @@ -28427,7 +28389,7 @@ fn coerceArrayPtrToSlice( const array_ty = ptr_array_ty.childType(mod); const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = val, - .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)), + .len = try mod.intValue(Type.usize, array_ty.arrayLen(mod)), }); return sema.addConstant(dest_ty, slice_val); } @@ -28781,7 +28743,7 @@ fn coerceArrayLike( for (element_vals, 0..) |*elem, i| { const index_ref = try sema.addConstant( Type.usize, - try Value.Tag.int_u64.create(sema.arena, i), + try mod.intValue(Type.usize, i), ); const src = inst_src; // TODO better source location const elem_src = inst_src; // TODO better source location @@ -29634,7 +29596,7 @@ fn analyzeSlice( var end_is_len = uncasted_end_opt == .none; const end = e: { if (array_ty.zigTypeTag(mod) == .Array) { - const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)); + const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod)); if (!end_is_len) { const end = if (by_length) end: { @@ -29643,8 +29605,8 @@ fn analyzeSlice( break :end try sema.coerce(block, Type.usize, uncasted_end, end_src); } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); if (try sema.resolveMaybeUndefVal(end)) |end_val| { - const len_s_val = try Value.Tag.int_u64.create( - sema.arena, + const len_s_val = try mod.intValue( + Type.usize, array_ty.arrayLenIncludingSentinel(mod), ); if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) { @@ -29689,12 +29651,10 @@ fn analyzeSlice( return sema.fail(block, src, "slice of undefined", .{}); } const has_sentinel = slice_ty.sentinel(mod) != null; - var int_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = slice_val.sliceLen(mod) + @boolToInt(has_sentinel), - }; - const slice_len_val = Value.initPayload(&int_payload.base); - if (!(try sema.compareAll(end_val, .lte, slice_len_val, Type.usize))) { + const slice_len = slice_val.sliceLen(mod); + const len_plus_sent = slice_len + @boolToInt(has_sentinel); + const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent); + if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) { const sentinel_label: []const u8 = if (has_sentinel) " +1 (sentinel)" else @@ -29712,13 +29672,10 @@ fn analyzeSlice( ); } - // If the slice has a sentinel, we subtract one so that - // end_is_len is only true if it equals the length WITHOUT - // the sentinel, so we don't add a sentinel type. - if (has_sentinel) { - int_payload.data -= 1; - } - + // If the slice has a sentinel, we consider end_is_len + // is only true if it equals the length WITHOUT the + // sentinel, so we don't add a sentinel type. + const slice_len_val = try mod.intValue(Type.usize, slice_len); if (end_val.eql(slice_len_val, Type.usize, mod)) { end_is_len = true; } @@ -30134,7 +30091,7 @@ fn cmpNumeric( } } - var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128)); + var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, mod)); defer bigint.deinit(); if (lhs_val.floatHasFraction()) { if (lhs_is_signed) { @@ -30193,7 +30150,7 @@ fn cmpNumeric( } } - var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128)); + var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, mod)); defer bigint.deinit(); if (rhs_val.floatHasFraction()) { if (rhs_is_signed) { @@ -31835,6 +31792,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .zero_u8 => unreachable, .one => unreachable, .one_usize => unreachable, + .negative_one => unreachable, .calling_convention_c => unreachable, .calling_convention_inline => unreachable, .void_value => unreachable, @@ -32462,11 +32420,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } if (fields_len > 0) { - var field_count_val: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = fields_len - 1, - }; - if (!(try sema.intFitsInType(Value.initPayload(&field_count_val.base), int_tag_ty, null))) { + const field_count_val = try mod.intValue(int_tag_ty, fields_len - 1); + if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) { const msg = msg: { const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{}); errdefer msg.destroy(sema.gpa); @@ -33207,7 +33162,8 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { - return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); + const mod = sema.mod; + return sema.addConstant(ty, try mod.intValue(ty, int)); } fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { @@ -33223,7 +33179,11 @@ pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { .tag = .interned, .data = .{ .interned = val.ip_index }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + const result = Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + // This assertion can be removed when the `ty` parameter is removed from + // this function thanks to the InternPool transition being complete. + assert(Type.eql(sema.typeOf(result), ty, sema.mod)); + return result; } const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); @@ -33833,19 +33793,18 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty); } return Value.Tag.aggregate.create(sema.arena, result_data); } - return sema.intAddScalar(lhs, rhs); + return sema.intAddScalar(lhs, rhs, ty); } -fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { +fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -33859,7 +33818,7 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); - return Value.fromBigInt(sema.arena, result_bigint.toConst()); + return mod.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -33884,28 +33843,22 @@ fn numberAddWrapScalar( return overflow_result.wrapped_result; } -fn intSub( - sema: *Sema, - lhs: Value, - rhs: Value, - ty: Type, -) !Value { +fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty); } return Value.Tag.aggregate.create(sema.arena, result_data); } - return sema.intSubScalar(lhs, rhs); + return sema.intSubScalar(lhs, rhs, ty); } -fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { +fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -33919,7 +33872,7 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.sub(lhs_bigint, rhs_bigint); - return Value.fromBigInt(sema.arena, result_bigint.toConst()); + return mod.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -33954,10 +33907,8 @@ fn floatAdd( if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -33971,31 +33922,32 @@ fn floatAddScalar( rhs: Value, float_type: Type, ) !Value { + const mod = sema.mod; const target = sema.mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(sema.arena, lhs_val + rhs_val); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(sema.arena, lhs_val + rhs_val); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(sema.arena, lhs_val + rhs_val); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(sema.arena, lhs_val + rhs_val); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(sema.arena, lhs_val + rhs_val); }, else => unreachable, @@ -34012,10 +33964,8 @@ fn floatSub( if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -34029,31 +33979,32 @@ fn floatSubScalar( rhs: Value, float_type: Type, ) !Value { + const mod = sema.mod; const target = sema.mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(sema.arena, lhs_val - rhs_val); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(sema.arena, lhs_val - rhs_val); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(sema.arena, lhs_val - rhs_val); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(sema.arena, lhs_val - rhs_val); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(sema.arena, lhs_val - rhs_val); }, else => unreachable, @@ -34071,10 +34022,8 @@ fn intSubWithOverflow( const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; @@ -34106,7 +34055,7 @@ fn intSubWithOverflowScalar( ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - const wrapped_result = try Value.fromBigInt(sema.arena, result_bigint.toConst()); + const wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ .overflow_bit = Value.boolToInt(overflowed), .wrapped_result = wrapped_result, @@ -34126,8 +34075,7 @@ fn floatToInt( const elem_ty = float_ty.childType(mod); const result_data = try sema.arena.alloc(Value, float_ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(sema.mod, i, &buf); + const elem_val = try val.elemValue(sema.mod, i); scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -34168,9 +34116,9 @@ fn floatToIntScalar( float_ty: Type, int_ty: Type, ) CompileError!Value { - const Limb = std.math.big.Limb; + const mod = sema.mod; - const float = val.toFloat(f128); + const float = val.toFloat(f128, mod); if (std.math.isNan(float)) { return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{ int_ty.fmt(sema.mod), @@ -34185,11 +34133,7 @@ fn floatToIntScalar( var big_int = try float128IntPartToBigInt(sema.arena, float); defer big_int.deinit(); - const result_limbs = try sema.arena.dupe(Limb, big_int.toConst().limbs); - const result = if (!big_int.isPositive()) - try Value.Tag.int_big_negative.create(sema.arena, result_limbs) - else - try Value.Tag.int_big_positive.create(sema.arena, result_limbs); + const result = try mod.intValue_big(int_ty, big_int.toConst()); if (!(try sema.intFitsInType(result, int_ty, null))) { return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{ @@ -34209,8 +34153,8 @@ fn intFitsInType( ty: Type, vector_index: ?*usize, ) CompileError!bool { + if (ty.ip_index == .comptime_int_type) return true; const mod = sema.mod; - const target = mod.getTarget(); switch (val.ip_index) { .undef, .zero, @@ -34218,103 +34162,26 @@ fn intFitsInType( .zero_u8, => return true, - .one, - .one_usize, - => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return switch (info.signedness) { - .signed => info.bits >= 2, - .unsigned => info.bits >= 1, - }; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .none => switch (val.tag()) { - .zero => return true, - - .one => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return switch (info.signedness) { - .signed => info.bits >= 2, - .unsigned => info.bits >= 1, - }; - }, - .ComptimeInt => return true, - else => unreachable, - }, - - .lazy_align => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); - // If it is u16 or bigger we know the alignment fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .lazy_size => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); - // If it is u64 or bigger we know the size fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - - .int_u64 => switch (ty.zigTypeTag(mod)) { - .Int => { - const x = val.castTag(.int_u64).?.data; - if (x == 0) return true; - const info = ty.intInfo(mod); - const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_i64 => switch (ty.zigTypeTag(mod)) { - .Int => { - const x = val.castTag(.int_i64).?.data; - if (x == 0) return true; - const info = ty.intInfo(mod); - if (info.signedness == .unsigned and x < 0) - return false; - var buffer: Value.BigIntSpace = undefined; - return (try val.toBigIntAdvanced(&buffer, mod, sema)).fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_big_positive => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, + .lazy_align => { + const info = ty.intInfo(mod); + const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); + // If it is u16 or bigger we know the alignment fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; }, - .int_big_negative => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, + .lazy_size => { + const info = ty.intInfo(mod); + const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); + // If it is u64 or bigger we know the size fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; }, .the_only_possible_value => { @@ -34327,17 +34194,14 @@ fn intFitsInType( .decl_ref, .function, .variable, - => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const ptr_bits = target.ptrBitWidth(); - return switch (info.signedness) { - .signed => info.bits > ptr_bits, - .unsigned => info.bits >= ptr_bits, - }; - }, - .ComptimeInt => return true, - else => unreachable, + => { + const info = ty.intInfo(mod); + const target = mod.getTarget(); + const ptr_bits = target.ptrBitWidth(); + return switch (info.signedness) { + .signed => info.bits > ptr_bits, + .unsigned => info.bits >= ptr_bits, + }; }, .aggregate => { @@ -34354,22 +34218,22 @@ fn intFitsInType( else => unreachable, }, - else => @panic("TODO"), + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| { + const info = ty.intInfo(mod); + var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + return big_int.fitsInTwosComp(info.signedness, info.bits); + }, + else => unreachable, + }, } } -fn intInRange( - sema: *Sema, - tag_ty: Type, - int_val: Value, - end: usize, -) !bool { +fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { + const mod = sema.mod; if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false; - var end_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = end, - }; - const end_val = Value.initPayload(&end_payload.base); + const end_val = try mod.intValue(tag_ty, end); if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false; return true; } @@ -34426,10 +34290,8 @@ fn intAddWithOverflow( const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; @@ -34461,7 +34323,7 @@ fn intAddWithOverflowScalar( ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - const result = try Value.fromBigInt(sema.arena, result_bigint.toConst()); + const result = try mod.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ .overflow_bit = Value.boolToInt(overflowed), .wrapped_result = result, @@ -34483,10 +34345,8 @@ fn compareAll( if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < ty.vectorLen(mod)) : (i += 1) { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) { return false; } @@ -34532,10 +34392,8 @@ fn compareVector( assert(ty.zigTypeTag(mod) == .Vector); const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); scalar.* = Value.makeBool(res_bool); } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index ee9a8abf0f81..28212a164c86 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -41,8 +41,8 @@ pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash, mod: *Module) void { return tv.val.hash(tv.ty, hasher, mod); } -pub fn enumToInt(tv: TypedValue, buffer: *Value.Payload.U64) Value { - return tv.val.enumToInt(tv.ty, buffer); +pub fn enumToInt(tv: TypedValue, mod: *Module) Allocator.Error!Value { + return tv.val.enumToInt(tv.ty, mod); } const max_aggregate_items = 100; @@ -157,14 +157,8 @@ pub fn print( return writer.writeAll(" }"); }, - .zero => return writer.writeAll("0"), - .one => return writer.writeAll("1"), .the_only_possible_value => return writer.writeAll("0"), .ty => return val.castTag(.ty).?.data.print(writer, mod), - .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), - .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), - .int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), - .int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), .lazy_align => { const sub_ty = val.castTag(.lazy_align).?.data; const x = sub_ty.abiAlignment(mod); @@ -313,8 +307,9 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - var elem_buf: Value.ElemValueBuffer = undefined; - const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; if (elem_val.isUndef()) break :str; buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; } @@ -330,10 +325,12 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); - var buf: Value.ElemValueBuffer = undefined; + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; try print(.{ .ty = elem_ty, - .val = payload.ptr.elemValueBuffer(mod, i, &buf), + .val = elem_val, }, writer, level - 1, mod); } if (len > max_aggregate_items) { diff --git a/src/Zir.zig b/src/Zir.zig index 8c03dfd060f0..34479cce5e81 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2120,6 +2120,7 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), + negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), void_value = @enumToInt(InternPool.Index.void_value), diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index ea7134c603e9..327e2c13e043 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3083,20 +3083,21 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, .Float => switch (ty.floatBits(func.target)) { - 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16)) }, - 32 => return WValue{ .float32 = val.toFloat(f32) }, - 64 => return WValue{ .float64 = val.toFloat(f64) }, + 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16, mod)) }, + 32 => return WValue{ .float32 = val.toFloat(f32, mod) }, + 64 => return WValue{ .float64 = val.toFloat(f64, mod) }, else => unreachable, }, - .Pointer => switch (val.ip_index) { - .null_value => return WValue{ .imm32 = 0 }, + .Pointer => return switch (val.ip_index) { + .null_value => WValue{ .imm32 = 0 }, .none => switch (val.tag()) { - .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), - .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, - .zero => return WValue{ .imm32 = 0 }, + .field_ptr, .elem_ptr, .opt_payload_ptr => func.lowerParentPtr(val, 0), else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| WValue{ .imm32 = @intCast(u32, int.storage.u64) }, + else => unreachable, + }, }, .Enum => { if (val.castTag(.enum_field_index)) |field_index| { @@ -3137,7 +3138,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const is_pl = val.errorUnionIsPayload(); - const err_val = if (!is_pl) val else Value.initTag(.zero); + const err_val = if (!is_pl) val else Value.zero; return func.lowerConstant(err_val, error_type); } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); @@ -3160,11 +3161,10 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { assert(struct_obj.layout == .Packed); var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; - var payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = std.mem.readIntLittle(u64, &buf), - }; - const int_val = Value.initPayload(&payload.base); + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); return func.lowerConstant(int_val, struct_obj.backing_int_ty); }, .Vector => { @@ -4899,8 +4899,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { - var buf: Value.ElemValueBuffer = undefined; - const value = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); + const value = (try mask.elemValue(mod, index)).toSignedInt(mod); try func.emitWValue(result); @@ -4920,8 +4919,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lanes = std.mem.asBytes(operands[1..]); for (0..@intCast(usize, mask_len)) |index| { - var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); + const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); const base_index = if (mask_elem >= 0) @intCast(u8, @intCast(i64, elem_size) * mask_elem) else diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ee604afd0fa9..51c6bc79e648 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2757,11 +2757,8 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { dst_ty.fmt(self.bin_file.options.module.?), }); - var mask_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits), - }; - const mask_val = Value.initPayload(&mask_pl.base); + const elem_ty = src_ty.childType(mod); + const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits)); var splat_pl = Value.Payload.SubValue{ .base = .{ .tag = .repeated }, @@ -4906,18 +4903,6 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { defer arena.deinit(); const ExpectedContents = struct { - scalar: union { - i64: Value.Payload.I64, - big: struct { - limbs: [ - @max( - std.math.big.int.Managed.default_capacity, - std.math.big.int.calcTwosCompLimbCount(128), - ) - ]std.math.big.Limb, - pl: Value.Payload.BigInt, - }, - }, repeated: Value.Payload.SubValue, }; var stack align(@alignOf(ExpectedContents)) = @@ -11429,8 +11414,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index }; const tag_val = Value.initPayload(&tag_pl.base); - var tag_int_pl: Value.Payload.U64 = undefined; - const tag_int_val = tag_val.enumToInt(tag_ty, &tag_int_pl); + const tag_int_val = try tag_val.enumToInt(tag_ty, mod); const tag_int = tag_int_val.toUnsignedInt(mod); const tag_off = if (layout.tag_align < layout.payload_align) @intCast(i32, layout.payload_size) diff --git a/src/codegen.zig b/src/codegen.zig index 5f5a3f66be21..9c9868892f4c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -214,15 +214,15 @@ pub fn generateSymbol( }, .Float => { switch (typed_value.ty.floatBits(target)) { - 16 => writeFloat(f16, typed_value.val.toFloat(f16), target, endian, try code.addManyAsArray(2)), - 32 => writeFloat(f32, typed_value.val.toFloat(f32), target, endian, try code.addManyAsArray(4)), - 64 => writeFloat(f64, typed_value.val.toFloat(f64), target, endian, try code.addManyAsArray(8)), + 16 => writeFloat(f16, typed_value.val.toFloat(f16, mod), target, endian, try code.addManyAsArray(2)), + 32 => writeFloat(f32, typed_value.val.toFloat(f32, mod), target, endian, try code.addManyAsArray(4)), + 64 => writeFloat(f64, typed_value.val.toFloat(f64, mod), target, endian, try code.addManyAsArray(8)), 80 => { - writeFloat(f80, typed_value.val.toFloat(f80), target, endian, try code.addManyAsArray(10)); + writeFloat(f80, typed_value.val.toFloat(f80, mod), target, endian, try code.addManyAsArray(10)); const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0, abi_size - 10); }, - 128 => writeFloat(f128, typed_value.val.toFloat(f128), target, endian, try code.addManyAsArray(16)), + 128 => writeFloat(f128, typed_value.val.toFloat(f128, mod), target, endian, try code.addManyAsArray(16)), else => unreachable, } return Result.ok; @@ -328,20 +328,6 @@ pub fn generateSymbol( return Result.ok; }, .none => switch (typed_value.val.tag()) { - .zero, .one, .int_u64, .int_big_positive => { - switch (target.ptrBitWidth()) { - 32 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); - }, - 64 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - }, - else => unreachable, - } - return Result.ok; - }, .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( bin_file, src_loc, @@ -399,7 +385,23 @@ pub fn generateSymbol( ), }, }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .int => { + switch (target.ptrBitWidth()) { + 32 => { + const x = typed_value.val.toUnsignedInt(mod); + mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); + }, + 64 => { + const x = typed_value.val.toUnsignedInt(mod); + mem.writeInt(u64, try code.addManyAsArray(8), x, endian); + }, + else => unreachable, + } + return Result.ok; + }, + else => unreachable, + }, }, .Int => { const info = typed_value.ty.intInfo(mod); @@ -449,8 +451,7 @@ pub fn generateSymbol( return Result.ok; }, .Enum => { - var int_buffer: Value.Payload.U64 = undefined; - const int_val = typed_value.enumToInt(&int_buffer); + const int_val = try typed_value.enumToInt(mod); const info = typed_value.ty.intInfo(mod); if (info.bits <= 8) { @@ -674,7 +675,7 @@ pub fn generateSymbol( const is_payload = typed_value.val.errorUnionIsPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const err_val = if (is_payload) Value.initTag(.zero) else typed_value.val; + const err_val = if (is_payload) Value.zero else typed_value.val; return generateSymbol(bin_file, src_loc, .{ .ty = error_ty, .val = err_val, @@ -689,7 +690,7 @@ pub fn generateSymbol( if (error_align > payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.initTag(.zero) else typed_value.val, + .val = if (is_payload) Value.zero else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -721,7 +722,7 @@ pub fn generateSymbol( const begin = code.items.len; switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.initTag(.zero) else typed_value.val, + .val = if (is_payload) Value.zero else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -961,13 +962,9 @@ fn lowerDeclRef( } // generate length - var slice_len: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = typed_value.val.sliceLen(mod), - }; switch (try generateSymbol(bin_file, src_loc, .{ .ty = Type.usize, - .val = Value.initPayload(&slice_len.base), + .val = try mod.intValue(Type.usize, typed_value.val.sliceLen(mod)), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -1196,13 +1193,13 @@ pub fn genTypedValue( .null_value => { return GenResult.mcv(.{ .immediate = 0 }); }, - .none => switch (typed_value.val.tag()) { - .int_u64 => { + .none => {}, + else => switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .int => { return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) }); }, else => {}, }, - else => {}, }, }, .Int => { @@ -1283,7 +1280,7 @@ pub fn genTypedValue( if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); + const err_val = if (!is_pl) typed_value.val else Value.zero; return genTypedValue(bin_file, src_loc, .{ .ty = error_type, .val = err_val, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 039c75de6720..9443c2298a72 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -568,11 +568,7 @@ pub const DeclGen = struct { var buf: Type.SlicePtrFieldTypeBuffer = undefined; try dg.renderValue(writer, ty.slicePtrFieldType(&buf, mod), val.slicePtr(), .Initializer); - var len_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = val.sliceLen(mod), - }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, val.sliceLen(mod)); if (location == .StaticInitializer) { return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); @@ -596,11 +592,17 @@ pub const DeclGen = struct { if (need_typecast) try writer.writeByte(')'); } - // Renders a "parent" pointer by recursing to the root decl/variable - // that its contents are defined with respect to. - // - // Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr - fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void { + /// Renders a "parent" pointer by recursing to the root decl/variable + /// that its contents are defined with respect to. + /// + /// Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr + fn renderParentPtr( + dg: *DeclGen, + writer: anytype, + ptr_val: Value, + ptr_ty: Type, + location: ValueRenderLocation, + ) error{ OutOfMemory, AnalysisFail }!void { const mod = dg.module; if (!ptr_ty.isSlice(mod)) { @@ -608,8 +610,11 @@ pub const DeclGen = struct { try dg.renderType(writer, ptr_ty); try writer.writeByte(')'); } + if (ptr_val.ip_index != .none) switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .int => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}), + else => unreachable, + }; switch (ptr_val.tag()) { - .int_u64, .one => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}), .decl_ref_mut, .decl_ref, .variable => { const decl_index = switch (ptr_val.tag()) { .decl_ref => ptr_val.castTag(.decl_ref).?.data, @@ -661,11 +666,7 @@ pub const DeclGen = struct { u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try dg.renderType(writer, u8_ptr_ty); @@ -891,7 +892,7 @@ pub const DeclGen = struct { }, .Array, .Vector => { const ai = ty.arrayInfo(mod); - if (ai.elem_type.eql(Type.u8, dg.module)) { + if (ai.elem_type.eql(Type.u8, mod)) { var literal = stringLiteral(writer); try literal.start(); const c_len = ty.arrayLenIncludingSentinel(mod); @@ -949,7 +950,7 @@ pub const DeclGen = struct { }, .Float => { const bits = ty.floatBits(target); - const f128_val = val.toFloat(f128); + const f128_val = val.toFloat(f128, mod); // All unsigned ints matching float types are pre-allocated. const repr_ty = mod.intType(.unsigned, bits) catch unreachable; @@ -963,21 +964,15 @@ pub const DeclGen = struct { }; switch (bits) { - 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16))), - 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32))), - 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64))), - 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80))), + 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), + 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), + 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), + 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), 128 => repr_val_big.set(@bitCast(u128, f128_val)), else => unreachable, } - var repr_val_pl = Value.Payload.BigInt{ - .base = .{ - .tag = if (repr_val_big.positive) .int_big_positive else .int_big_negative, - }, - .data = repr_val_big.limbs[0..repr_val_big.len], - }; - const repr_val = Value.initPayload(&repr_val_pl.base); + const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst()); try writer.writeAll("zig_cast_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -988,10 +983,10 @@ pub const DeclGen = struct { try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { - 16 => try writer.print("{x}", .{val.toFloat(f16)}), - 32 => try writer.print("{x}", .{val.toFloat(f32)}), - 64 => try writer.print("{x}", .{val.toFloat(f64)}), - 80 => try writer.print("{x}", .{val.toFloat(f80)}), + 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}), + 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}), + 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}), + 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}), 128 => try writer.print("{x}", .{f128_val}), else => unreachable, } @@ -1031,10 +1026,10 @@ pub const DeclGen = struct { if (std.math.isNan(f128_val)) switch (bits) { // We only actually need to pass the significand, but it will get // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16))}), - 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32))}), - 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64))}), - 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80))}), + 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), + 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), + 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), + 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), else => unreachable, }; @@ -1060,19 +1055,6 @@ pub const DeclGen = struct { try writer.writeAll(")NULL)"); }, .none => switch (val.tag()) { - .zero => if (ty.isSlice(mod)) { - var slice_pl = Value.Payload.Slice{ - .base = .{ .tag = .slice }, - .data = .{ .ptr = val, .len = Value.undef }, - }; - const slice_val = Value.initPayload(&slice_pl.base); - - return dg.renderValue(writer, ty, slice_val, location); - } else { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - try writer.writeAll(")NULL)"); - }, .variable => { const decl = val.castTag(.variable).?.data.owner_decl; return dg.renderDeclValue(writer, ty, val, decl, location); @@ -1101,7 +1083,7 @@ pub const DeclGen = struct { const extern_fn = val.castTag(.extern_fn).?.data; try dg.renderDeclName(writer, extern_fn.owner_decl, 0); }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { + .lazy_align, .lazy_size => { try writer.writeAll("(("); try dg.renderType(writer, ty); return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); @@ -1116,7 +1098,14 @@ pub const DeclGen = struct { else => unreachable, }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); + }, + else => unreachable, + }, }, .Array, .Vector => { if (location == .FunctionArgument) { @@ -1155,7 +1144,7 @@ pub const DeclGen = struct { .bytes => val.castTag(.bytes).?.data, .str_lit => bytes: { const str_lit = val.castTag(.str_lit).?.data; - break :bytes dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + break :bytes mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; }, else => unreachable, }; @@ -1170,21 +1159,18 @@ pub const DeclGen = struct { else => {}, } // Fall back to generic implementation. - var arena = std.heap.ArenaAllocator.init(dg.gpa); - defer arena.deinit(); - const arena_allocator = arena.allocator(); // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal const max_string_initializer_len = 65535; const ai = ty.arrayInfo(mod); - if (ai.elem_type.eql(Type.u8, dg.module)) { + if (ai.elem_type.eql(Type.u8, mod)) { if (ai.len <= max_string_initializer_len) { var literal = stringLiteral(writer); try literal.start(); var index: usize = 0; while (index < ai.len) : (index += 1) { - const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val = try val.elemValue(mod, index); const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try literal.writeChar(elem_val_u8); } @@ -1198,7 +1184,7 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val = try val.elemValue(mod, index); const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try writer.print("'\\x{x}'", .{elem_val_u8}); } @@ -1213,7 +1199,7 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val = try val.elemValue(mod, index); try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); } if (ai.sentinel) |s| { @@ -1361,8 +1347,7 @@ pub const DeclGen = struct { const bits = Type.smallestUnsignedBits(int_info.bits - 1); const bit_offset_ty = try mod.intType(.unsigned, bits); - var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + var bit_offset: u64 = 0; var eff_num_fields: usize = 0; for (0..field_vals.len) |field_i| { @@ -1394,12 +1379,13 @@ pub const DeclGen = struct { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const cast_context = IntCastContext{ .value = .{ .value = field_val } }; - if (bit_offset_val_pl.data != 0) { + if (bit_offset != 0) { try writer.writeAll("zig_shl_"); try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try writer.writeByte(')'); } else { @@ -1409,7 +1395,7 @@ pub const DeclGen = struct { if (needs_closing_paren) try writer.writeByte(')'); if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); - bit_offset_val_pl.data += field_ty.bitSize(mod); + bit_offset += field_ty.bitSize(mod); needs_closing_paren = true; eff_index += 1; } @@ -1427,15 +1413,16 @@ pub const DeclGen = struct { try dg.renderType(writer, ty); try writer.writeByte(')'); - if (bit_offset_val_pl.data != 0) { + if (bit_offset != 0) { try dg.renderValue(writer, field_ty, field_val, .Other); try writer.writeAll(" << "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); } else { try dg.renderValue(writer, field_ty, field_val, .Other); } - bit_offset_val_pl.data += field_ty.bitSize(mod); + bit_offset += field_ty.bitSize(mod); empty = false; } try writer.writeByte(')'); @@ -1451,7 +1438,7 @@ pub const DeclGen = struct { try writer.writeByte(')'); } - const field_i = ty.unionTagFieldIndex(union_obj.tag, dg.module).?; + const field_i = ty.unionTagFieldIndex(union_obj.tag, mod).?; const field_ty = ty.unionFields().values()[field_i].ty; const field_name = ty.unionFields().keys()[field_i]; if (ty.containerLayout() == .Packed) { @@ -1951,10 +1938,10 @@ pub const DeclGen = struct { if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); - var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = int_info.bits }; + const bits_ty = if (is_big) Type.u16 else Type.u8; try writer.print(", {}", .{try dg.fmtIntLiteral( - if (is_big) Type.u16 else Type.u8, - Value.initPayload(&bits_pl.base), + bits_ty, + try mod.intValue(bits_ty, int_info.bits), .FunctionArgument, )}); } @@ -2495,8 +2482,7 @@ pub fn genErrDecls(o: *Object) !void { for (mod.error_name_list.items, 0..) |name, value| { if (value != 0) try writer.writeByte(','); - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, name.len); try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{ fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other), @@ -2548,8 +2534,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { }; const tag_val = Value.initPayload(&tag_pl.base); - var int_pl: Value.Payload.U64 = undefined; - const int_val = tag_val.enumToInt(enum_ty, &int_pl); + const int_val = try tag_val.enumToInt(enum_ty, mod); const name_ty = try mod.arrayType(.{ .len = name.len, @@ -2560,8 +2545,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; const name_val = Value.initPayload(&name_pl.base); - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, name.len); try w.print(" case {}: {{\n static ", .{ try o.dg.fmtIntLiteral(enum_ty, int_val, .Other), @@ -3396,12 +3380,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const host_ty = try mod.intType(.unsigned, host_bits); const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); - - var bit_offset_val_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_info.bit_offset, - }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.bit_offset); const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod))); @@ -3563,14 +3542,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { try v.elem(f, writer); } else switch (dest_int_info.signedness) { .unsigned => { - var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa); - defer arena.deinit(); - - const ExpectedContents = union { u: Value.Payload.U64, i: Value.Payload.I64 }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - - const mask_val = try inst_scalar_ty.maxInt(stack.get(), mod); + const mask_val = try inst_scalar_ty.maxIntScalar(mod); try writer.writeAll("zig_and_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); try writer.writeByte('('); @@ -3581,11 +3553,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { .signed => { const c_bits = toCIntBits(scalar_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); - var shift_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = c_bits - dest_bits, - }; - const shift_val = Value.initPayload(&shift_pl.base); + const shift_val = try mod.intValue(Type.u8, c_bits - dest_bits); try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); @@ -3705,12 +3673,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const host_ty = try mod.intType(.unsigned, host_bits); const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); - - var bit_offset_val_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_info.bit_offset, - }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.bit_offset); const src_bits = src_ty.bitSize(mod); @@ -3725,11 +3688,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try mask.shiftLeft(&mask, ptr_info.bit_offset); try mask.bitNotWrap(&mask, .unsigned, host_bits); - var mask_pl = Value.Payload.BigInt{ - .base = .{ .tag = .int_big_positive }, - .data = mask.limbs[0..mask.len()], - }; - const mask_val = Value.initPayload(&mask_pl.base); + const mask_val = try mod.intValue_big(host_ty, mask.toConst()); try f.writeCValueDeref(writer, ptr_val); try v.elem(f, writer); @@ -5356,11 +5315,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5412,11 +5367,7 @@ fn fieldPtr( u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5466,11 +5417,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - var bit_offset_val_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = struct_obj.packedFieldBitOffset(mod, extra.field_index), - }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); const field_int_signedness = if (inst_ty.isAbiInt(mod)) inst_ty.intInfo(mod).signedness @@ -5492,13 +5440,13 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); } - if (bit_offset_val_pl.data > 0) { + if (bit_offset > 0) { try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); } try f.writeCValue(writer, struct_byval, .Other); - if (bit_offset_val_pl.data > 0) { + if (bit_offset > 0) { try writer.writeAll(", "); try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try writer.writeByte(')'); @@ -5854,9 +5802,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } else try f.writeCValue(writer, operand, .Initializer); try writer.writeAll("; "); - const array_len = array_ty.arrayLen(mod); - var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = array_len }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod)); try f.writeCValueMember(writer, local, .{ .identifier = "len" }); try writer.print(" = {};\n", .{try f.fmtIntLiteral(Type.usize, len_val)}); @@ -6632,26 +6578,17 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); try reap(f, inst, &.{ extra.a, extra.b }); // local cannot alias operands for (0..extra.mask_len) |index| { - var dst_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @intCast(u64, index), - }; - try f.writeCValue(writer, local, .Other); try writer.writeByte('['); - try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&dst_pl.base), .Other); + try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, index), .Other); try writer.writeAll("] = "); - var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); - var src_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @intCast(u64, mask_elem ^ mask_elem >> 63), - }; + const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); + const src_val = try mod.intValue(Type.usize, @intCast(u64, mask_elem ^ mask_elem >> 63)); try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other); try writer.writeByte('['); - try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&src_pl.base), .Other); + try f.object.dg.renderValue(writer, Type.usize, src_val, .Other); try writer.writeAll("];\n"); } @@ -6730,8 +6667,6 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { defer arena.deinit(); const ExpectedContents = union { - u: Value.Payload.U64, - i: Value.Payload.I64, f16: Value.Payload.Float_16, f32: Value.Payload.Float_32, f64: Value.Payload.Float_64, @@ -6746,13 +6681,13 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .And => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one, else => switch (scalar_ty.intInfo(mod).signedness) { - .unsigned => try scalar_ty.maxInt(stack.get(), mod), + .unsigned => try scalar_ty.maxIntScalar(mod), .signed => Value.negative_one, }, }, .Min => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one, - .Int => try scalar_ty.maxInt(stack.get(), mod), + .Int => try scalar_ty.maxIntScalar(mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, @@ -6879,8 +6814,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + var bit_offset: u64 = 0; var empty = true; for (0..elements.len) |field_i| { @@ -6925,12 +6859,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits); try writer.writeByte(')'); if (!empty) try writer.writeByte(')'); - bit_offset_val_pl.data += field_ty.bitSize(mod); + bit_offset += field_ty.bitSize(mod); empty = false; } @@ -6976,8 +6911,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { }; const tag_val = Value.initPayload(&tag_pl.base); - var int_pl: Value.Payload.U64 = undefined; - const int_val = tag_val.enumToInt(tag_ty, &int_pl); + const int_val = try tag_val.enumToInt(tag_ty, mod); const a = try Assignment.start(f, writer, tag_ty); try f.writeCValueMember(writer, local, .{ .identifier = "tag" }); @@ -7640,10 +7574,6 @@ fn formatIntLiteral( c_limb_int_info.signedness = .unsigned; c_limb_cty = c_limb_info.cty; } - var c_limb_val_pl = Value.Payload.BigInt{ - .base = .{ .tag = if (c_limb_mut.positive) .int_big_positive else .int_big_negative }, - .data = c_limb_mut.limbs[0..c_limb_mut.len], - }; if (limb_offset > 0) try writer.writeAll(", "); try formatIntLiteral(.{ @@ -7651,7 +7581,7 @@ fn formatIntLiteral( .int_info = c_limb_int_info, .kind = data.kind, .cty = c_limb_cty, - .val = Value.initPayload(&c_limb_val_pl.base), + .val = try mod.intValue_big(Type.comptime_int, c_limb_mut.toConst()), }, fmt, options, writer); } } @@ -7750,7 +7680,7 @@ const Vectorize = struct { pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { const mod = f.object.dg.module; return if (ty.zigTypeTag(mod) == .Vector) index: { - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen(mod) }; + const len_val = try mod.intValue(Type.usize, ty.vectorLen(mod)); const local = try f.allocLocal(inst, Type.usize); @@ -7759,7 +7689,7 @@ const Vectorize = struct { try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); try f.writeCValue(writer, local, .Other); try writer.print(" < {d}; ", .{ - try f.fmtIntLiteral(Type.usize, Value.initPayload(&len_pl.base)), + try f.fmtIntLiteral(Type.usize, len_val), }); try f.writeCValue(writer, local, .Other); try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, Value.one)}); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5ad3c2b8c537..9b62c5448da7 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -12,6 +12,7 @@ const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); const build_options = @import("build_options"); const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); const TypedValue = @import("../TypedValue.zig"); const Air = @import("../Air.zig"); @@ -1535,8 +1536,7 @@ pub const Object = struct { defer gpa.free(field_name_z); buf_field_index.data = @intCast(u32, i); - var buf_u64: Value.Payload.U64 = undefined; - const field_int_val = field_index_val.enumToInt(ty, &buf_u64); + const field_int_val = try field_index_val.enumToInt(ty, mod); var bigint_space: Value.BigIntSpace = undefined; const bigint = field_int_val.toBigInt(&bigint_space, mod); @@ -3255,8 +3255,6 @@ pub const DeclGen = struct { const llvm_type = try dg.lowerType(tv.ty); return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull(); }, - // TODO this duplicates code with Pointer but they should share the handling - // of the tv.val.tag() and then Int should do extra constPtrToInt on top .Int => switch (tv.val.ip_index) { .none => switch (tv.val.tag()) { .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), @@ -3277,8 +3275,7 @@ pub const DeclGen = struct { }, }, .Enum => { - var int_buffer: Value.Payload.U64 = undefined; - const int_val = tv.enumToInt(&int_buffer); + const int_val = try tv.enumToInt(mod); var bigint_space: Value.BigIntSpace = undefined; const bigint = int_val.toBigInt(&bigint_space, mod); @@ -3307,25 +3304,25 @@ pub const DeclGen = struct { const llvm_ty = try dg.lowerType(tv.ty); switch (tv.ty.floatBits(target)) { 16 => { - const repr = @bitCast(u16, tv.val.toFloat(f16)); + const repr = @bitCast(u16, tv.val.toFloat(f16, mod)); const llvm_i16 = dg.context.intType(16); const int = llvm_i16.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 32 => { - const repr = @bitCast(u32, tv.val.toFloat(f32)); + const repr = @bitCast(u32, tv.val.toFloat(f32, mod)); const llvm_i32 = dg.context.intType(32); const int = llvm_i32.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 64 => { - const repr = @bitCast(u64, tv.val.toFloat(f64)); + const repr = @bitCast(u64, tv.val.toFloat(f64, mod)); const llvm_i64 = dg.context.intType(64); const int = llvm_i64.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 80 => { - const float = tv.val.toFloat(f80); + const float = tv.val.toFloat(f80, mod); const repr = std.math.break_f80(float); const llvm_i80 = dg.context.intType(80); var x = llvm_i80.constInt(repr.exp, .False); @@ -3338,7 +3335,7 @@ pub const DeclGen = struct { } }, 128 => { - var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128)); + var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod)); // LLVM seems to require that the lower half of the f128 be placed first // in the buffer. if (native_endian == .Big) { @@ -3388,7 +3385,7 @@ pub const DeclGen = struct { }; return dg.context.constStruct(&fields, fields.len, .False); }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { + .lazy_align, .lazy_size => { const llvm_usize = try dg.lowerType(Type.usize); const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); @@ -3396,10 +3393,6 @@ pub const DeclGen = struct { .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); }, - .zero => { - const llvm_type = try dg.lowerType(tv.ty); - return llvm_type.constNull(); - }, .opt_payload => { const payload = tv.val.castTag(.opt_payload).?.data; return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); @@ -3408,7 +3401,10 @@ pub const DeclGen = struct { tv.ty.fmtDebug(), tag, }), }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .int => |int| return lowerIntAsPtr(dg, int), + else => unreachable, + }, }, .Array => switch (tv.val.tag()) { .bytes => { @@ -3592,7 +3588,7 @@ pub const DeclGen = struct { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) tv.val else Value.initTag(.zero); + const err_val = if (!is_pl) tv.val else Value.zero; return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } @@ -3600,7 +3596,7 @@ pub const DeclGen = struct { const error_align = Type.anyerror.abiAlignment(mod); const llvm_error_value = try dg.lowerValue(.{ .ty = Type.anyerror, - .val = if (is_pl) Value.initTag(.zero) else tv.val, + .val = if (is_pl) Value.zero else tv.val, }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, @@ -3882,14 +3878,9 @@ pub const DeclGen = struct { const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { - var byte_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = bytes[i], - }; - elem.* = try dg.lowerValue(.{ .ty = elem_ty, - .val = Value.initPayload(&byte_payload.base), + .val = try mod.intValue(elem_ty, bytes[i]), }); } return llvm.constVector( @@ -3940,14 +3931,9 @@ pub const DeclGen = struct { const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { - var byte_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = bytes[i], - }; - elem.* = try dg.lowerValue(.{ .ty = elem_ty, - .val = Value.initPayload(&byte_payload.base), + .val = try mod.intValue(elem_ty, bytes[i]), }); } return llvm.constVector( @@ -3974,6 +3960,13 @@ pub const DeclGen = struct { } } + fn lowerIntAsPtr(dg: *DeclGen, int: InternPool.Key.Int) *llvm.Value { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int.storage.toBigInt(&bigint_space); + const llvm_int = lowerBigInt(dg, Type.usize, bigint); + return llvm_int.constIntToPtr(dg.context.pointerType(0)); + } + fn lowerBigInt(dg: *DeclGen, ty: Type, bigint: std.math.big.int.Const) *llvm.Value { const mod = dg.module; const int_info = ty.intInfo(mod); @@ -4018,6 +4011,10 @@ pub const DeclGen = struct { fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { const mod = dg.module; const target = mod.getTarget(); + if (ptr_val.ip_index != .none) switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .int => |int| return lowerIntAsPtr(dg, int), + else => unreachable, + }; switch (ptr_val.tag()) { .decl_ref_mut => { const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; @@ -4031,18 +4028,6 @@ pub const DeclGen = struct { const decl = ptr_val.castTag(.variable).?.data.owner_decl; return dg.lowerParentPtrDecl(ptr_val, decl); }, - .int_i64 => { - const int = ptr_val.castTag(.int_i64).?.data; - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(@bitCast(u64, int), .False); - return llvm_int.constIntToPtr(dg.context.pointerType(0)); - }, - .int_u64 => { - const int = ptr_val.castTag(.int_u64).?.data; - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(int, .False); - return llvm_int.constIntToPtr(dg.context.pointerType(0)); - }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr, byte_aligned); @@ -4185,10 +4170,6 @@ pub const DeclGen = struct { if (tv.ty.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = tv.ty.slicePtrFieldType(&buf, mod); - var slice_len: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = tv.val.sliceLen(mod), - }; const fields: [2]*llvm.Value = .{ try self.lowerValue(.{ .ty = ptr_ty, @@ -4196,7 +4177,7 @@ pub const DeclGen = struct { }), try self.lowerValue(.{ .ty = Type.usize, - .val = Value.initPayload(&slice_len.base), + .val = try mod.intValue(Type.usize, tv.val.sliceLen(mod)), }), }; return self.context.constStruct(&fields, fields.len, .False); @@ -8507,8 +8488,7 @@ pub const FuncGen = struct { const dest_slice = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOf(bin_op.rhs); - const module = self.dg.module; - const target = module.getTarget(); + const target = mod.getTarget(); const dest_ptr_align = ptr_ty.ptrAlignment(mod); const u8_llvm_ty = self.context.intType(8); const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); @@ -8526,7 +8506,7 @@ pub const FuncGen = struct { const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); - if (safety and module.comp.bin_file.options.valgrind) { + if (safety and mod.comp.bin_file.options.valgrind) { self.valgrindMarkUndef(dest_ptr, len); } return null; @@ -8536,8 +8516,7 @@ pub const FuncGen = struct { // repeating byte pattern, for example, `@as(u64, 0)` has a // repeating byte pattern of 0 bytes. In such case, the memset // intrinsic can be used. - var value_buffer: Value.Payload.U64 = undefined; - if (try elem_val.hasRepeatedByteRepr(elem_ty, module, &value_buffer)) |byte_val| { + if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| { const fill_byte = try self.resolveValue(.{ .ty = Type.u8, .val = byte_val, @@ -8829,16 +8808,10 @@ pub const FuncGen = struct { for (names) |name| { const err_int = mod.global_error_set.get(name).?; - const this_tag_int_value = int: { - var tag_val_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = err_int, - }; - break :int try self.dg.lowerValue(.{ - .ty = Type.err_int, - .val = Value.initPayload(&tag_val_payload.base), - }); - }; + const this_tag_int_value = try self.dg.lowerValue(.{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, err_int), + }); switch_instr.addCase(this_tag_int_value, valid_block); } self.builder.positionBuilderAtEnd(valid_block); @@ -9122,8 +9095,7 @@ pub const FuncGen = struct { const llvm_i32 = self.context.intType(32); for (values, 0..) |*val, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(mod, i, &buf); + const elem = try mask.elemValue(mod, i); if (elem.isUndef()) { val.* = llvm_i32.getUndef(); } else { @@ -9457,8 +9429,7 @@ pub const FuncGen = struct { .data = @intCast(u32, enum_field_index), }; const tag_val = Value.initPayload(&tag_val_payload.base); - var int_payload: Value.Payload.U64 = undefined; - const tag_int_val = tag_val.enumToInt(tag_ty, &int_payload); + const tag_int_val = try tag_val.enumToInt(tag_ty, mod); break :blk tag_int_val.toUnsignedInt(mod); }; if (layout.payload_size == 0) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index e3b5d24ed9ff..32e0c13c376f 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -555,15 +555,15 @@ pub const DeclGen = struct { // TODO: Swap endianess if the compiler is big endian. switch (ty.floatBits(target)) { 16 => { - const float_bits = val.toFloat(f16); + const float_bits = val.toFloat(f16, mod); try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); }, 32 => { - const float_bits = val.toFloat(f32); + const float_bits = val.toFloat(f32, mod); try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); }, 64 => { - const float_bits = val.toFloat(f64); + const float_bits = val.toFloat(f64, mod); try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); }, else => unreachable, @@ -584,7 +584,7 @@ pub const DeclGen = struct { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by usize for now, // though. - try self.addInt(Type.usize, Value.initTag(.zero)); + try self.addInt(Type.usize, Value.zero); // TODO: Add dependency return; }, @@ -743,8 +743,7 @@ pub const DeclGen = struct { try self.addUndef(padding); }, .Enum => { - var int_val_buffer: Value.Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &int_val_buffer); + const int_val = try val.enumToInt(ty, mod); const int_ty = ty.intTagType(); @@ -787,22 +786,24 @@ pub const DeclGen = struct { try self.addUndef(layout.padding); }, - .ErrorSet => switch (val.tag()) { - .@"error" => { - const err_name = val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - try self.addConstInt(u16, @intCast(u16, kv.value)); + .ErrorSet => switch (val.ip_index) { + .none => switch (val.tag()) { + .@"error" => { + const err_name = val.castTag(.@"error").?.data.name; + const kv = try dg.module.getErrorValue(err_name); + try self.addConstInt(u16, @intCast(u16, kv.value)); + }, + else => unreachable, }, - .zero => { - // Unactivated error set. - try self.addConstInt(u16, 0); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| try self.addConstInt(u16, @intCast(u16, int.storage.u64)), + else => unreachable, }, - else => unreachable, }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else Value.initTag(.zero); + const error_val = if (!is_pl) val else Value.zero; const eu_layout = dg.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { @@ -993,9 +994,9 @@ pub const DeclGen = struct { .indirect => return try self.spv.constInt(result_ty_ref, @boolToInt(val.toBool(mod))), }, .Float => return switch (ty.floatBits(target)) { - 16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16) } } }), - 32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32) } } }), - 64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64) } } }), + 16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16, mod) } } }), + 32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32, mod) } } }), + 64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64, mod) } } }), 80, 128 => unreachable, // TODO else => unreachable, }, @@ -1531,6 +1532,7 @@ pub const DeclGen = struct { } fn genDecl(self: *DeclGen) !void { + if (true) @panic("TODO: update SPIR-V backend for InternPool changes"); const mod = self.module; const decl = mod.declPtr(self.decl_index); const spv_decl_index = try self.resolveDecl(self.decl_index); @@ -2087,8 +2089,7 @@ pub const DeclGen = struct { var i: usize = 0; while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(self.module, i, &buf); + const elem = try mask.elemValue(self.module, i); if (elem.isUndef()) { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { @@ -3146,9 +3147,8 @@ pub const DeclGen = struct { const int_val = switch (cond_ty.zigTypeTag(mod)) { .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod), .Enum => blk: { - var int_buffer: Value.Payload.U64 = undefined; // TODO: figure out of cond_ty is correct (something with enum literals) - break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(mod); // TODO: composite integer constants + break :blk (try value.enumToInt(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants }, else => unreachable, }; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 3e4e90951ee6..c971b5b26fdd 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -421,8 +421,7 @@ pub const DeclState = struct { const value = vals.keys()[field_i]; // TODO do not assume a 64bit enum value - could be bigger. // See https://github.com/ziglang/zig/issues/645 - var int_buffer: Value.Payload.U64 = undefined; - const field_int_val = value.enumToInt(ty, &int_buffer); + const field_int_val = try value.enumToInt(ty, mod); break :value @bitCast(u64, field_int_val.toSignedInt(mod)); } else @intCast(u64, field_i); mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian); diff --git a/src/type.zig b/src/type.zig index 592eb9a21e76..5b18245323b5 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2077,10 +2077,10 @@ pub const Type = struct { } /// May capture a reference to `ty`. - pub fn lazyAbiAlignment(ty: Type, mod: *const Module, arena: Allocator) !Value { + pub fn lazyAbiAlignment(ty: Type, mod: *Module, arena: Allocator) !Value { switch (try ty.abiAlignmentAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, - .scalar => |x| return Value.Tag.int_u64.create(arena, x), + .scalar => |x| return mod.intValue(ty, x), } } @@ -2468,10 +2468,10 @@ pub const Type = struct { } /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, mod: *const Module, arena: Allocator) !Value { + pub fn lazyAbiSize(ty: Type, mod: *Module, arena: Allocator) !Value { switch (try ty.abiSizeAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, - .scalar => |x| return Value.Tag.int_u64.create(arena, x), + .scalar => |x| return mod.intValue(ty, x), } } @@ -4310,8 +4310,8 @@ pub const Type = struct { } // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, arena: Allocator, mod: *const Module) !Value { - const scalar = try minIntScalar(ty.scalarType(mod), arena, mod); + pub fn minInt(ty: Type, arena: Allocator, mod: *Module) !Value { + const scalar = try minIntScalar(ty.scalarType(mod), mod); if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { @@ -4319,38 +4319,28 @@ pub const Type = struct { } } - /// Asserts that self.zigTypeTag(mod) == .Int. - pub fn minIntScalar(ty: Type, arena: Allocator, mod: *const Module) !Value { - assert(ty.zigTypeTag(mod) == .Int); + /// Asserts that the type is an integer. + pub fn minIntScalar(ty: Type, mod: *Module) !Value { const info = ty.intInfo(mod); - - if (info.bits == 0) { - return Value.initTag(.the_only_possible_value); - } - - if (info.signedness == .unsigned) { - return Value.zero; - } + if (info.signedness == .unsigned) return Value.zero; + if (info.bits == 0) return Value.negative_one; if (std.math.cast(u6, info.bits - 1)) |shift| { const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); - return Value.Tag.int_i64.create(arena, n); + return mod.intValue(Type.comptime_int, n); } - var res = try std.math.big.int.Managed.init(arena); + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + try res.setTwosCompIntLimit(.min, info.signedness, info.bits); - const res_const = res.toConst(); - if (res_const.positive) { - return Value.Tag.int_big_positive.create(arena, res_const.limbs); - } else { - return Value.Tag.int_big_negative.create(arena, res_const.limbs); - } + return mod.intValue_big(Type.comptime_int, res.toConst()); } // Works for vectors and vectors of integers. - pub fn maxInt(ty: Type, arena: Allocator, mod: *const Module) !Value { - const scalar = try maxIntScalar(ty.scalarType(mod), arena, mod); + pub fn maxInt(ty: Type, arena: Allocator, mod: *Module) !Value { + const scalar = try maxIntScalar(ty.scalarType(mod), mod); if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { @@ -4358,41 +4348,39 @@ pub const Type = struct { } } - /// Asserts that self.zigTypeTag() == .Int. - pub fn maxIntScalar(self: Type, arena: Allocator, mod: *const Module) !Value { - assert(self.zigTypeTag(mod) == .Int); + /// Asserts that the type is an integer. + pub fn maxIntScalar(self: Type, mod: *Module) !Value { const info = self.intInfo(mod); - if (info.bits == 0) { - return Value.initTag(.the_only_possible_value); - } - - switch (info.bits - @boolToInt(info.signedness == .signed)) { - 0 => return Value.zero, - 1 => return Value.one, + switch (info.bits) { + 0 => return switch (info.signedness) { + .signed => Value.negative_one, + .unsigned => Value.zero, + }, + 1 => return switch (info.signedness) { + .signed => Value.zero, + .unsigned => Value.one, + }, else => {}, } if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { .signed => { const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); - return Value.Tag.int_i64.create(arena, n); + return mod.intValue(Type.comptime_int, n); }, .unsigned => { const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); - return Value.Tag.int_u64.create(arena, n); + return mod.intValue(Type.comptime_int, n); }, }; - var res = try std.math.big.int.Managed.init(arena); + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + try res.setTwosCompIntLimit(.max, info.signedness, info.bits); - const res_const = res.toConst(); - if (res_const.positive) { - return Value.Tag.int_big_positive.create(arena, res_const.limbs); - } else { - return Value.Tag.int_big_negative.create(arena, res_const.limbs); - } + return mod.intValue_big(Type.comptime_int, res.toConst()); } /// Asserts the type is an enum or a union. @@ -4497,12 +4485,11 @@ pub const Type = struct { const S = struct { fn fieldWithRange(int_ty: Type, int_val: Value, end: usize, m: *Module) ?usize { if (int_val.compareAllWithZero(.lt, m)) return null; - var end_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = end, + const end_val = m.intValue(int_ty, end) catch |err| switch (err) { + // TODO: eliminate this failure condition + error.OutOfMemory => @panic("OOM"), }; - const end_val = Value.initPayload(&end_payload.base); - if (int_val.compareAll(.gte, end_val, int_ty, m)) return null; + if (int_val.compareScalar(.gte, end_val, int_ty, m)) return null; return @intCast(usize, int_val.toUnsignedInt(m)); } }; diff --git a/src/value.zig b/src/value.zig index f8188c64ab1d..c0ea9e149f13 100644 --- a/src/value.zig +++ b/src/value.zig @@ -33,8 +33,6 @@ pub const Value = struct { // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - zero, - one, /// The only possible value for a particular type, which is stored externally. the_only_possible_value, @@ -43,10 +41,6 @@ pub const Value = struct { // After this, the tag requires a payload. ty, - int_u64, - int_i64, - int_big_positive, - int_big_negative, function, extern_fn, /// A comptime-known pointer can point to the address of a global @@ -129,17 +123,11 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .zero, - .one, .the_only_possible_value, .empty_struct_value, .empty_array, => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), - .int_big_positive, - .int_big_negative, - => Payload.BigInt, - .extern_fn => Payload.ExternFn, .decl_ref => Payload.Decl, @@ -169,8 +157,6 @@ pub const Value = struct { .lazy_size, => Payload.Ty, - .int_u64 => Payload.U64, - .int_i64 => Payload.I64, .function => Payload.Function, .variable => Payload.Variable, .decl_ref_mut => Payload.DeclRefMut, @@ -281,8 +267,6 @@ pub const Value = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .zero, - .one, .the_only_possible_value, .empty_array, .empty_struct_value, @@ -300,20 +284,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .int_u64 => return self.copyPayloadShallow(arena, Payload.U64), - .int_i64 => return self.copyPayloadShallow(arena, Payload.I64), - .int_big_positive, .int_big_negative => { - const old_payload = self.cast(Payload.BigInt).?; - const new_payload = try arena.create(Payload.BigInt); - new_payload.* = .{ - .base = .{ .tag = self.legacy.ptr_otherwise.tag }, - .data = try arena.dupe(std.math.big.Limb, old_payload.data), - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, .function => return self.copyPayloadShallow(arena, Payload.Function), .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn), .variable => return self.copyPayloadShallow(arena, Payload.Variable), @@ -525,8 +495,6 @@ pub const Value = struct { .@"union" => { return out_stream.writeAll("(union value)"); }, - .zero => return out_stream.writeAll("0"), - .one => return out_stream.writeAll("1"), .the_only_possible_value => return out_stream.writeAll("(the only possible value)"), .ty => return val.castTag(.ty).?.data.dump("", options, out_stream), .lazy_align => { @@ -539,10 +507,6 @@ pub const Value = struct { try val.castTag(.lazy_size).?.data.dump("", options, out_stream); return try out_stream.writeAll(")"); }, - .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", options, out_stream), - .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), - .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), - .int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), .runtime_value => return out_stream.writeAll("[runtime value]"), .function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}), .extern_fn => return out_stream.writeAll("(extern function)"), @@ -661,9 +625,8 @@ pub const Value = struct { fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { const result = try allocator.alloc(u8, @intCast(usize, len)); - var elem_value_buf: ElemValueBuffer = undefined; for (result, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf); + const elem_val = try val.elemValue(mod, i); elem.* = @intCast(u8, elem_val.toUnsignedInt(mod)); } return result; @@ -695,7 +658,7 @@ pub const Value = struct { } } - pub fn enumToInt(val: Value, ty: Type, buffer: *Payload.U64) Value { + pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { const field_index = switch (val.tag()) { .enum_field_index => val.castTag(.enum_field_index).?.data, .the_only_possible_value => blk: { @@ -717,11 +680,7 @@ pub const Value = struct { return enum_full.values.keys()[field_index]; } else { // Field index and integer values are the same. - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = field_index, - }; - return Value.initPayload(&buffer.base); + return mod.intValue(enum_full.tag_ty, field_index); } }, .enum_numbered => { @@ -730,20 +689,13 @@ pub const Value = struct { return enum_obj.values.keys()[field_index]; } else { // Field index and integer values are the same. - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = field_index, - }; - return Value.initPayload(&buffer.base); + return mod.intValue(enum_obj.tag_ty, field_index); } }, .enum_simple => { // Field index and integer values are the same. - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = field_index, - }; - return Value.initPayload(&buffer.base); + const tag_ty = ty.intTagType(); + return mod.intValue(tag_ty, field_index); }, else => unreachable, } @@ -802,12 +754,9 @@ pub const Value = struct { .undef => unreachable, .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), .none => switch (val.tag()) { - .zero, .the_only_possible_value, // i0, u0 => BigIntMutable.init(&space.limbs, 0).toConst(), - .one => BigIntMutable.init(&space.limbs, 1).toConst(), - .enum_field_index => { const index = val.castTag(.enum_field_index).?.data; return BigIntMutable.init(&space.limbs, index).toConst(); @@ -816,11 +765,6 @@ pub const Value = struct { const sub_val = val.castTag(.runtime_value).?.data; return sub_val.toBigIntAdvanced(space, mod, opt_sema); }, - .int_u64 => BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(), - .int_i64 => BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(), - .int_big_positive => val.castTag(.int_big_positive).?.asBigInt(), - .int_big_negative => val.castTag(.int_big_negative).?.asBigInt(), - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -869,17 +813,9 @@ pub const Value = struct { .bool_true => return 1, .undef => unreachable, .none => switch (val.tag()) { - .zero, .the_only_possible_value, // i0, u0 => return 0, - .one => return 1, - - .int_u64 => return val.castTag(.int_u64).?.data, - .int_i64 => return @intCast(u64, val.castTag(.int_i64).?.data), - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(u64) catch null, - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null, - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -922,17 +858,9 @@ pub const Value = struct { .bool_true => return 1, .undef => unreachable, .none => switch (val.tag()) { - .zero, .the_only_possible_value, // i0, u0 => return 0, - .one => return 1, - - .int_u64 => return @intCast(i64, val.castTag(.int_u64).?.data), - .int_i64 => return val.castTag(.int_i64).?.data, - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(i64) catch unreachable, - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(i64) catch unreachable, - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; return @intCast(i64, ty.abiAlignment(mod)); @@ -959,22 +887,7 @@ pub const Value = struct { return switch (val.ip_index) { .bool_true => true, .bool_false => false, - .none => switch (val.tag()) { - .one => true, - .zero => false, - - .int_u64 => switch (val.castTag(.int_u64).?.data) { - 0 => false, - 1 => true, - else => unreachable, - }, - .int_i64 => switch (val.castTag(.int_i64).?.data) { - 0 => false, - 1 => true, - else => unreachable, - }, - else => unreachable, - }, + .none => unreachable, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| !big_int.eqZero(), @@ -1004,6 +917,7 @@ pub const Value = struct { ReinterpretDeclRef, IllDefinedMemoryLayout, Unimplemented, + OutOfMemory, }!void { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); @@ -1022,16 +936,14 @@ pub const Value = struct { const bits = int_info.bits; const byte_count = (bits + 7) / 8; - var enum_buffer: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_buffer); + const int_val = try val.enumToInt(ty, mod); if (byte_count <= @sizeOf(u64)) { - const int: u64 = switch (int_val.tag()) { - .zero => 0, - .one => 1, - .int_u64 => int_val.castTag(.int_u64).?.data, - .int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data), - else => unreachable, + const ip_key = mod.intern_pool.indexToKey(int_val.ip_index); + const int: u64 = switch (ip_key.int.storage) { + .u64 => |x| x, + .i64 => |x| @bitCast(u64, x), + .big_int => unreachable, }; for (buffer[0..byte_count], 0..) |_, i| switch (endian) { .Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), @@ -1044,11 +956,11 @@ pub const Value = struct { } }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16)), endian), - 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32)), endian), - 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64)), endian), - 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80)), endian), - 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128)), endian), + 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16, mod)), endian), + 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32, mod)), endian), + 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64, mod)), endian), + 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80, mod)), endian), + 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128, mod)), endian), else => unreachable, }, .Array => { @@ -1056,10 +968,9 @@ pub const Value = struct { const elem_ty = ty.childType(mod); const elem_size = @intCast(usize, elem_ty.abiSize(mod)); var elem_i: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; var buf_off: usize = 0; while (elem_i < len) : (elem_i += 1) { - const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf); + const elem_val = try val.elemValue(mod, elem_i); try elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]); buf_off += elem_size; } @@ -1122,7 +1033,13 @@ pub const Value = struct { /// /// Both the start and the end of the provided buffer must be tight, since /// big-endian packed memory layouts start at the end of the buffer. - pub fn writeToPackedMemory(val: Value, ty: Type, mod: *Module, buffer: []u8, bit_offset: usize) error{ReinterpretDeclRef}!void { + pub fn writeToPackedMemory( + val: Value, + ty: Type, + mod: *Module, + buffer: []u8, + bit_offset: usize, + ) error{ ReinterpretDeclRef, OutOfMemory }!void { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef()) { @@ -1147,16 +1064,14 @@ pub const Value = struct { const bits = ty.intInfo(mod).bits; const abi_size = @intCast(usize, ty.abiSize(mod)); - var enum_buffer: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_buffer); + const int_val = try val.enumToInt(ty, mod); if (abi_size == 0) return; if (abi_size <= @sizeOf(u64)) { - const int: u64 = switch (int_val.tag()) { - .zero => 0, - .one => 1, - .int_u64 => int_val.castTag(.int_u64).?.data, - .int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data), + const ip_key = mod.intern_pool.indexToKey(int_val.ip_index); + const int: u64 = switch (ip_key.int.storage) { + .u64 => |x| x, + .i64 => |x| @bitCast(u64, x), else => unreachable, }; std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian); @@ -1167,11 +1082,11 @@ pub const Value = struct { } }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16)), endian), - 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32)), endian), - 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64)), endian), - 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80)), endian), - 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128)), endian), + 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16, mod)), endian), + 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32, mod)), endian), + 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64, mod)), endian), + 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80, mod)), endian), + 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128, mod)), endian), else => unreachable, }, .Vector => { @@ -1181,11 +1096,10 @@ pub const Value = struct { var bits: u16 = 0; var elem_i: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; while (elem_i < len) : (elem_i += 1) { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) len - elem_i - 1 else elem_i; - const elem_val = val.elemValueBuffer(mod, tgt_elem_i, &elem_value_buf); + const elem_val = try val.elemValue(mod, tgt_elem_i); try elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits); bits += elem_bit_size; } @@ -1264,11 +1178,13 @@ pub const Value = struct { if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => { const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian); - return Value.Tag.int_i64.create(arena, (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits)); + const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); + return mod.intValue(ty, result); }, .unsigned => { const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian); - return Value.Tag.int_u64.create(arena, (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits)); + const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); + return mod.intValue(ty, result); }, } else { // Slow path, we have to construct a big-int const Limb = std.math.big.Limb; @@ -1277,7 +1193,7 @@ pub const Value = struct { var bigint = BigIntMutable.init(limbs_buffer, 0); bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness); - return fromBigInt(arena, bigint.toConst()); + return mod.intValue_big(ty, bigint.toConst()); } }, .Float => switch (ty.floatBits(target)) { @@ -1381,8 +1297,8 @@ pub const Value = struct { const bits = int_info.bits; if (bits == 0) return Value.zero; if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 - .signed => return Value.Tag.int_i64.create(arena, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), - .unsigned => return Value.Tag.int_u64.create(arena, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), + .signed => return mod.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), + .unsigned => return mod.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), } else { // Slow path, we have to construct a big-int const Limb = std.math.big.Limb; const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); @@ -1390,7 +1306,7 @@ pub const Value = struct { var bigint = BigIntMutable.init(limbs_buffer, 0); bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); - return fromBigInt(arena, bigint.toConst()); + return mod.intValue_big(ty, bigint.toConst()); } }, .Float => switch (ty.floatBits(target)) { @@ -1444,32 +1360,29 @@ pub const Value = struct { } /// Asserts that the value is a float or an integer. - pub fn toFloat(val: Value, comptime T: type) T { - return switch (val.tag()) { - .float_16 => @floatCast(T, val.castTag(.float_16).?.data), - .float_32 => @floatCast(T, val.castTag(.float_32).?.data), - .float_64 => @floatCast(T, val.castTag(.float_64).?.data), - .float_80 => @floatCast(T, val.castTag(.float_80).?.data), - .float_128 => @floatCast(T, val.castTag(.float_128).?.data), - - .zero => 0, - .one => 1, - .int_u64 => { - if (T == f80) { - @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); - } - return @intToFloat(T, val.castTag(.int_u64).?.data); + pub fn toFloat(val: Value, comptime T: type, mod: *const Module) T { + return switch (val.ip_index) { + .none => switch (val.tag()) { + .float_16 => @floatCast(T, val.castTag(.float_16).?.data), + .float_32 => @floatCast(T, val.castTag(.float_32).?.data), + .float_64 => @floatCast(T, val.castTag(.float_64).?.data), + .float_80 => @floatCast(T, val.castTag(.float_80).?.data), + .float_128 => @floatCast(T, val.castTag(.float_128).?.data), + + else => unreachable, }, - .int_i64 => { - if (T == f80) { - @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); - } - return @intToFloat(T, val.castTag(.int_i64).?.data); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), + inline .u64, .i64 => |x| { + if (T == f80) { + @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); + } + return @intToFloat(T, x); + }, + }, + else => unreachable, }, - - .int_big_positive => @floatCast(T, bigIntToFloat(val.castTag(.int_big_positive).?.data, true)), - .int_big_negative => @floatCast(T, bigIntToFloat(val.castTag(.int_big_negative).?.data, false)), - else => unreachable, }; } @@ -1498,24 +1411,6 @@ pub const Value = struct { .bool_false => ty_bits, .bool_true => ty_bits - 1, .none => switch (val.tag()) { - .zero => ty_bits, - .one => ty_bits - 1, - - .int_u64 => { - const big = @clz(val.castTag(.int_u64).?.data); - return big + ty_bits - 64; - }, - .int_i64 => { - @panic("TODO implement i64 Value clz"); - }, - .int_big_positive => { - const bigint = val.castTag(.int_big_positive).?.asBigInt(); - return bigint.clz(ty_bits); - }, - .int_big_negative => { - @panic("TODO implement int_big_negative Value clz"); - }, - .the_only_possible_value => { assert(ty_bits == 0); return ty_bits; @@ -1546,24 +1441,6 @@ pub const Value = struct { .bool_false => ty_bits, .bool_true => 0, .none => switch (val.tag()) { - .zero => ty_bits, - .one => 0, - - .int_u64 => { - const big = @ctz(val.castTag(.int_u64).?.data); - return if (big == 64) ty_bits else big; - }, - .int_i64 => { - @panic("TODO implement i64 Value ctz"); - }, - .int_big_positive => { - const bigint = val.castTag(.int_big_positive).?.asBigInt(); - return bigint.ctz(); - }, - .int_big_negative => { - @panic("TODO implement int_big_negative Value ctz"); - }, - .the_only_possible_value => { assert(ty_bits == 0); return ty_bits; @@ -1596,20 +1473,7 @@ pub const Value = struct { switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, - .none => switch (val.tag()) { - .zero => return 0, - .one => return 1, - - .int_u64 => return @popCount(val.castTag(.int_u64).?.data), - - else => { - const info = ty.intInfo(mod); - - var buffer: Value.BigIntSpace = undefined; - const int = val.toBigInt(&buffer, mod); - return @intCast(u64, int.popCount(info.bits)); - }, - }, + .none => unreachable, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| { const info = ty.intInfo(mod); @@ -1622,7 +1486,7 @@ pub const Value = struct { } } - pub fn bitReverse(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value { + pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { assert(!val.isUndef()); const info = ty.intInfo(mod); @@ -1637,10 +1501,10 @@ pub const Value = struct { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitReverse(operand_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } - pub fn byteSwap(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value { + pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { assert(!val.isUndef()); const info = ty.intInfo(mod); @@ -1658,7 +1522,7 @@ pub const Value = struct { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.byteSwap(operand_bigint, info.signedness, info.bits / 8); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// Asserts the value is an integer and not undefined. @@ -1669,19 +1533,7 @@ pub const Value = struct { .bool_false => 0, .bool_true => 1, .none => switch (self.tag()) { - .zero, - .the_only_possible_value, - => 0, - - .one => 1, - - .int_u64 => { - const x = self.castTag(.int_u64).?.data; - if (x == 0) return 0; - return @intCast(usize, std.math.log2(x) + 1); - }, - .int_big_positive => self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(), - .int_big_negative => self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(), + .the_only_possible_value => 0, .decl_ref_mut, .comptime_field_ptr, @@ -1715,13 +1567,14 @@ pub const Value = struct { /// Converts an integer or a float to a float. May result in a loss of information. /// Caller can find out by equality checking the result against the operand. - pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type, target: Target) !Value { + pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type, mod: *const Module) !Value { + const target = mod.getTarget(); switch (dest_ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, self.toFloat(f16)), - 32 => return Value.Tag.float_32.create(arena, self.toFloat(f32)), - 64 => return Value.Tag.float_64.create(arena, self.toFloat(f64)), - 80 => return Value.Tag.float_80.create(arena, self.toFloat(f80)), - 128 => return Value.Tag.float_128.create(arena, self.toFloat(f128)), + 16 => return Value.Tag.float_16.create(arena, self.toFloat(f16, mod)), + 32 => return Value.Tag.float_32.create(arena, self.toFloat(f32, mod)), + 64 => return Value.Tag.float_64.create(arena, self.toFloat(f64, mod)), + 80 => return Value.Tag.float_80.create(arena, self.toFloat(f80, mod)), + 128 => return Value.Tag.float_128.create(arena, self.toFloat(f128, mod)), else => unreachable, } } @@ -1729,10 +1582,6 @@ pub const Value = struct { /// Asserts the value is a float pub fn floatHasFraction(self: Value) bool { return switch (self.tag()) { - .zero, - .one, - => false, - .float_16 => @rem(self.castTag(.float_16).?.data, 1) != 0, .float_32 => @rem(self.castTag(.float_32).?.data, 1) != 0, .float_64 => @rem(self.castTag(.float_64).?.data, 1) != 0, @@ -1757,11 +1606,8 @@ pub const Value = struct { .bool_false => return .eq, .bool_true => return .gt, .none => return switch (lhs.tag()) { - .zero, - .the_only_possible_value, - => .eq, + .the_only_possible_value => .eq, - .one, .decl_ref, .decl_ref_mut, .comptime_field_ptr, @@ -1777,10 +1623,6 @@ pub const Value = struct { const val = lhs.castTag(.runtime_value).?.data; return val.orderAgainstZeroAdvanced(mod, opt_sema); }, - .int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0), - .int_i64 => std.math.order(lhs.castTag(.int_i64).?.data, 0), - .int_big_positive => lhs.castTag(.int_big_positive).?.asBigInt().orderAgainstScalar(0), - .int_big_negative => lhs.castTag(.int_big_negative).?.asBigInt().orderAgainstScalar(0), .lazy_align => { const ty = lhs.castTag(.lazy_align).?.data; @@ -1878,8 +1720,8 @@ pub const Value = struct { } } if (lhs_float or rhs_float) { - const lhs_f128 = lhs.toFloat(f128); - const rhs_f128 = rhs.toFloat(f128); + const lhs_f128 = lhs.toFloat(f128, mod); + const rhs_f128 = rhs.toFloat(f128, mod); return std.math.order(lhs_f128, rhs_f128); } @@ -1929,15 +1771,13 @@ pub const Value = struct { /// Asserts the values are comparable. Both operands have type `ty`. /// For vectors, returns true if comparison is true for ALL elements. - pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool { + pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) !bool { if (ty.zigTypeTag(mod) == .Vector) { - var i: usize = 0; - while (i < ty.vectorLen(mod)) : (i += 1) { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod), mod)) { + const scalar_ty = ty.scalarType(mod); + for (0..ty.vectorLen(mod)) |i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, mod)) { return false; } } @@ -2203,10 +2043,8 @@ pub const Value = struct { return a_type.eql(b_type, mod); }, .Enum => { - var buf_a: Payload.U64 = undefined; - var buf_b: Payload.U64 = undefined; - const a_val = a.enumToInt(ty, &buf_a); - const b_val = b.enumToInt(ty, &buf_b); + const a_val = try a.enumToInt(ty, mod); + const b_val = try b.enumToInt(ty, mod); const int_ty = ty.intTagType(); return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, @@ -2214,11 +2052,9 @@ pub const Value = struct { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); var i: usize = 0; - var a_buf: ElemValueBuffer = undefined; - var b_buf: ElemValueBuffer = undefined; while (i < len) : (i += 1) { - const a_elem = elemValueBuffer(a, mod, i, &a_buf); - const b_elem = elemValueBuffer(b, mod, i, &b_buf); + const a_elem = try elemValue(a, mod, i); + const b_elem = try elemValue(b, mod, i); if (!(try eqlAdvanced(a_elem, elem_ty, b_elem, elem_ty, mod, opt_sema))) { return false; } @@ -2282,17 +2118,17 @@ pub const Value = struct { }, .Float => { switch (ty.floatBits(target)) { - 16 => return @bitCast(u16, a.toFloat(f16)) == @bitCast(u16, b.toFloat(f16)), - 32 => return @bitCast(u32, a.toFloat(f32)) == @bitCast(u32, b.toFloat(f32)), - 64 => return @bitCast(u64, a.toFloat(f64)) == @bitCast(u64, b.toFloat(f64)), - 80 => return @bitCast(u80, a.toFloat(f80)) == @bitCast(u80, b.toFloat(f80)), - 128 => return @bitCast(u128, a.toFloat(f128)) == @bitCast(u128, b.toFloat(f128)), + 16 => return @bitCast(u16, a.toFloat(f16, mod)) == @bitCast(u16, b.toFloat(f16, mod)), + 32 => return @bitCast(u32, a.toFloat(f32, mod)) == @bitCast(u32, b.toFloat(f32, mod)), + 64 => return @bitCast(u64, a.toFloat(f64, mod)) == @bitCast(u64, b.toFloat(f64, mod)), + 80 => return @bitCast(u80, a.toFloat(f80, mod)) == @bitCast(u80, b.toFloat(f80, mod)), + 128 => return @bitCast(u128, a.toFloat(f128, mod)) == @bitCast(u128, b.toFloat(f128, mod)), else => unreachable, } }, .ComptimeFloat => { - const a_float = a.toFloat(f128); - const b_float = b.toFloat(f128); + const a_float = a.toFloat(f128, mod); + const b_float = b.toFloat(f128, mod); const a_nan = std.math.isNan(a_float); const b_nan = std.math.isNan(b_float); @@ -2354,16 +2190,16 @@ pub const Value = struct { .Float => { // For hash/eql purposes, we treat floats as their IEEE integer representation. switch (ty.floatBits(mod.getTarget())) { - 16 => std.hash.autoHash(hasher, @bitCast(u16, val.toFloat(f16))), - 32 => std.hash.autoHash(hasher, @bitCast(u32, val.toFloat(f32))), - 64 => std.hash.autoHash(hasher, @bitCast(u64, val.toFloat(f64))), - 80 => std.hash.autoHash(hasher, @bitCast(u80, val.toFloat(f80))), - 128 => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))), + 16 => std.hash.autoHash(hasher, @bitCast(u16, val.toFloat(f16, mod))), + 32 => std.hash.autoHash(hasher, @bitCast(u32, val.toFloat(f32, mod))), + 64 => std.hash.autoHash(hasher, @bitCast(u64, val.toFloat(f64, mod))), + 80 => std.hash.autoHash(hasher, @bitCast(u80, val.toFloat(f80, mod))), + 128 => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), else => unreachable, } }, .ComptimeFloat => { - const float = val.toFloat(f128); + const float = val.toFloat(f128, mod); const is_nan = std.math.isNan(float); std.hash.autoHash(hasher, is_nan); if (!is_nan) { @@ -2387,9 +2223,11 @@ pub const Value = struct { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); var index: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { - const elem_val = val.elemValueBuffer(mod, index, &elem_value_buf); + const elem_val = val.elemValue(mod, index) catch |err| switch (err) { + // Will be solved when arrays and vectors get migrated to the intern pool. + error.OutOfMemory => @panic("OOM"), + }; elem_val.hash(elem_ty, hasher, mod); } }, @@ -2438,8 +2276,8 @@ pub const Value = struct { hasher.update(val.getError().?); }, .Enum => { - var enum_space: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_space); + // This panic will go away when enum values move to be stored in the intern pool. + const int_val = val.enumToInt(ty, mod) catch @panic("OOM"); hashInt(int_val, hasher, mod); }, .Union => { @@ -2494,7 +2332,7 @@ pub const Value = struct { .Type => { val.toType().hashWithHasher(hasher, mod); }, - .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))), + .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { .slice => { const slice = val.castTag(.slice).?.data; @@ -2508,9 +2346,11 @@ pub const Value = struct { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); var index: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { - const elem_val = val.elemValueBuffer(mod, index, &elem_value_buf); + const elem_val = val.elemValue(mod, index) catch |err| switch (err) { + // Will be solved when arrays and vectors get migrated to the intern pool. + error.OutOfMemory => @panic("OOM"), + }; elem_val.hashUncoerced(elem_ty, hasher, mod); } }, @@ -2661,12 +2501,6 @@ pub const Value = struct { hashPtr(opt_ptr.container_ptr, hasher, mod); }, - .zero, - .one, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, .the_only_possible_value, .lazy_align, .lazy_size, @@ -2720,23 +2554,7 @@ pub const Value = struct { /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. - pub fn elemValue(val: Value, mod: *Module, arena: Allocator, index: usize) !Value { - return elemValueAdvanced(val, mod, index, arena, undefined); - } - - pub const ElemValueBuffer = Payload.U64; - - pub fn elemValueBuffer(val: Value, mod: *Module, index: usize, buffer: *ElemValueBuffer) Value { - return elemValueAdvanced(val, mod, index, null, buffer) catch unreachable; - } - - pub fn elemValueAdvanced( - val: Value, - mod: *Module, - index: usize, - arena: ?Allocator, - buffer: *ElemValueBuffer, - ) error{OutOfMemory}!Value { + pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { switch (val.ip_index) { .undef => return Value.undef, .none => switch (val.tag()) { @@ -2751,43 +2569,27 @@ pub const Value = struct { .bytes => { const byte = val.castTag(.bytes).?.data[index]; - if (arena) |a| { - return Tag.int_u64.create(a, byte); - } else { - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = byte, - }; - return initPayload(&buffer.base); - } + return mod.intValue(Type.u8, byte); }, .str_lit => { const str_lit = val.castTag(.str_lit).?.data; const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; const byte = bytes[index]; - if (arena) |a| { - return Tag.int_u64.create(a, byte); - } else { - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = byte, - }; - return initPayload(&buffer.base); - } + return mod.intValue(Type.u8, byte); }, // No matter the index; all the elements are the same! .repeated => return val.castTag(.repeated).?.data, .aggregate => return val.castTag(.aggregate).?.data[index], - .slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(mod, index, arena, buffer), + .slice => return val.castTag(.slice).?.data.ptr.elemValue(mod, index), - .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValueAdvanced(mod, index, arena, buffer), - .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValueAdvanced(mod, index, arena, buffer), - .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValueAdvanced(mod, index, arena, buffer), + .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValue(mod, index), + .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValue(mod, index), + .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValue(mod, index), .elem_ptr => { const data = val.castTag(.elem_ptr).?.data; - return data.array_ptr.elemValueAdvanced(mod, index + data.index, arena, buffer); + return data.array_ptr.elemValue(mod, index + data.index); }, .field_ptr => { const data = val.castTag(.field_ptr).?.data; @@ -2795,7 +2597,7 @@ pub const Value = struct { const container_decl = mod.declPtr(decl_index); const field_type = data.container_ty.structFieldType(data.field_index); const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); - return field_val.elemValueAdvanced(mod, index, arena, buffer); + return field_val.elemValue(mod, index); } else unreachable; }, @@ -2803,11 +2605,11 @@ pub const Value = struct { // to have only one possible value itself. .the_only_possible_value => return val, - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), + .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValue(mod, index), + .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValue(mod, index), - .opt_payload => return val.castTag(.opt_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), - .eu_payload => return val.castTag(.eu_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), + .opt_payload => return val.castTag(.opt_payload).?.data.elemValue(mod, index), + .eu_payload => return val.castTag(.eu_payload).?.data.elemValue(mod, index), else => unreachable, }, @@ -3004,7 +2806,7 @@ pub const Value = struct { /// TODO: check for cases such as array that is not marked undef but all the element /// values are marked undef, or struct that is not marked undef but all fields are marked /// undef, etc. - pub fn anyUndef(self: Value, mod: *Module) bool { + pub fn anyUndef(self: Value, mod: *Module) !bool { switch (self.ip_index) { .undef => return true, .none => switch (self.tag()) { @@ -3012,18 +2814,16 @@ pub const Value = struct { const payload = self.castTag(.slice).?; const len = payload.data.len.toUnsignedInt(mod); - var elem_value_buf: ElemValueBuffer = undefined; - var i: usize = 0; - while (i < len) : (i += 1) { - const elem_val = payload.data.ptr.elemValueBuffer(mod, i, &elem_value_buf); - if (elem_val.anyUndef(mod)) return true; + for (0..len) |i| { + const elem_val = try payload.data.ptr.elemValue(mod, i); + if (try elem_val.anyUndef(mod)) return true; } }, .aggregate => { const payload = self.castTag(.aggregate).?; for (payload.data) |val| { - if (val.anyUndef(mod)) return true; + if (try val.anyUndef(mod)) return true; } }, else => {}, @@ -3036,35 +2836,37 @@ pub const Value = struct { /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. - pub fn isNull(self: Value, mod: *const Module) bool { - return switch (self.ip_index) { + pub fn isNull(val: Value, mod: *const Module) bool { + return switch (val.ip_index) { .undef => unreachable, .unreachable_value => unreachable, - .null_value => true, - .none => switch (self.tag()) { + + .null_value, + .zero, + .zero_usize, + .zero_u8, + => true, + + .none => switch (val.tag()) { .opt_payload => false, // If it's not one of those two tags then it must be a C pointer value, // in which case the value 0 is null and other values are non-null. - .zero, - .the_only_possible_value, - => true, - - .one => false, - - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, - => self.orderAgainstZero(mod).compare(.eq), + .the_only_possible_value => true, .inferred_alloc => unreachable, .inferred_alloc_comptime => unreachable, else => false, }, - else => false, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.eqZero(), + inline .u64, .i64 => |x| x == 0, + }, + else => unreachable, + }, }; } @@ -3078,17 +2880,13 @@ pub const Value = struct { .unreachable_value => unreachable, .none => switch (self.tag()) { .@"error" => self.castTag(.@"error").?.data.name, - .int_u64 => @panic("TODO"), - .int_i64 => @panic("TODO"), - .int_big_positive => @panic("TODO"), - .int_big_negative => @panic("TODO"), - .one => @panic("TODO"), + .eu_payload => null, + .inferred_alloc => unreachable, .inferred_alloc_comptime => unreachable, - - else => null, + else => unreachable, }, - else => null, + else => unreachable, }; } @@ -3147,10 +2945,10 @@ pub const Value = struct { pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { if (int_ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, int_ty.vectorLen(mod)); + const scalar_ty = float_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(mod), mod, opt_sema); + const elem_val = try val.elemValue(mod, i); + scalar.* = try intToFloatScalar(elem_val, arena, scalar_ty, mod, opt_sema); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3162,24 +2960,7 @@ pub const Value = struct { switch (val.ip_index) { .undef => return val, .none => switch (val.tag()) { - .zero, .one => return val, - .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 - .int_u64 => { - return intToFloatInner(val.castTag(.int_u64).?.data, arena, float_ty, target); - }, - .int_i64 => { - return intToFloatInner(val.castTag(.int_i64).?.data, arena, float_ty, target); - }, - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const float = bigIntToFloat(limbs, true); - return floatToValue(float, arena, float_ty, target); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - const float = bigIntToFloat(limbs, false); - return floatToValue(float, arena, float_ty, target); - }, + .the_only_possible_value => return Value.zero, // for i0, u0 .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -3198,7 +2979,16 @@ pub const Value = struct { }, else => unreachable, }, - else => unreachable, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .big_int => |big_int| { + const float = bigIntToFloat(big_int.limbs, big_int.positive); + return floatToValue(float, arena, float_ty, target); + }, + inline .u64, .i64 => |x| intToFloatInner(x, arena, float_ty, target), + }, + else => unreachable, + }, } } @@ -3238,22 +3028,6 @@ pub const Value = struct { wrapped_result: Value, }; - pub fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value { - if (big_int.positive) { - if (big_int.to(u64)) |x| { - return Value.Tag.int_u64.create(arena, x); - } else |_| { - return Value.Tag.int_big_positive.create(arena, big_int.limbs); - } - } else { - if (big_int.to(i64)) |x| { - return Value.Tag.int_i64.create(arena, x); - } else |_| { - return Value.Tag.int_big_negative.create(arena, big_int.limbs); - } - } - } - /// Supports (vectors of) integers only; asserts neither operand is undefined. pub fn intAddSat( lhs: Value, @@ -3264,12 +3038,11 @@ pub const Value = struct { ) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3299,7 +3072,7 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.addSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// Supports (vectors of) integers only; asserts neither operand is undefined. @@ -3312,12 +3085,11 @@ pub const Value = struct { ) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3347,7 +3119,7 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.subSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn intMulWithOverflow( @@ -3360,12 +3132,11 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try arena.alloc(Value, ty.vectorLen(mod)); const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -3408,7 +3179,7 @@ pub const Value = struct { return OverflowArithmeticResult{ .overflow_bit = boolToInt(overflowed), - .wrapped_result = try fromBigInt(arena, result_bigint.toConst()), + .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), }; } @@ -3423,10 +3194,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -3467,10 +3236,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -3510,7 +3277,7 @@ pub const Value = struct { ); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena); result_bigint.saturate(result_bigint.toConst(), info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -3542,8 +3309,7 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); + const elem_val = try val.elemValue(mod, i); scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -3572,7 +3338,7 @@ pub const Value = struct { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. @@ -3580,19 +3346,17 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseAndScalar(lhs, rhs, allocator, mod); + return bitwiseAndScalar(lhs, rhs, ty, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { + pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without @@ -3608,7 +3372,7 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitAnd(lhs_bigint, rhs_bigint); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. @@ -3616,10 +3380,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -3632,12 +3394,7 @@ pub const Value = struct { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - - const all_ones = if (ty.isSignedInt(mod)) - try Value.Tag.int_i64.create(arena, -1) - else - try ty.maxInt(arena, mod); - + const all_ones = if (ty.isSignedInt(mod)) Value.negative_one else try ty.maxIntScalar(mod); return bitwiseXor(anded, all_ones, ty, arena, mod); } @@ -3646,19 +3403,17 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseOrScalar(lhs, rhs, allocator, mod); + return bitwiseOrScalar(lhs, rhs, ty, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { + pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without @@ -3673,27 +3428,26 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitOr(lhs_bigint, rhs_bigint); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseXorScalar(lhs, rhs, allocator, mod); + return bitwiseXorScalar(lhs, rhs, ty, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { + pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without @@ -3709,25 +3463,24 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitXor(lhs_bigint, rhs_bigint); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intDivScalar(lhs, rhs, allocator, mod); + return intDivScalar(lhs, rhs, ty, allocator, mod); } - pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3749,25 +3502,24 @@ pub const Value = struct { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_q.toConst()); + return mod.intValue_big(ty, result_q.toConst()); } pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intDivFloorScalar(lhs, rhs, allocator, mod); + return intDivFloorScalar(lhs, rhs, ty, allocator, mod); } - pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3789,25 +3541,24 @@ pub const Value = struct { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_q.toConst()); + return mod.intValue_big(ty, result_q.toConst()); } pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intModScalar(lhs, rhs, allocator, mod); + return intModScalar(lhs, rhs, ty, allocator, mod); } - pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3829,7 +3580,7 @@ pub const Value = struct { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_r.toConst()); + return mod.intValue_big(ty, result_r.toConst()); } /// Returns true if the value is a floating point type and is NaN. Returns false otherwise. @@ -3877,46 +3628,44 @@ pub const Value = struct { } pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatRemScalar(lhs, rhs, float_type, arena, target); + return floatRemScalar(lhs, rhs, float_type, arena, mod); } - pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { + pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *const Module) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @rem(lhs_val, rhs_val)); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @rem(lhs_val, rhs_val)); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @rem(lhs_val, rhs_val)); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @rem(lhs_val, rhs_val)); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @rem(lhs_val, rhs_val)); }, else => unreachable, @@ -3924,46 +3673,44 @@ pub const Value = struct { } pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatModScalar(lhs, rhs, float_type, arena, target); + return floatModScalar(lhs, rhs, float_type, arena, mod); } - pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { + pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *const Module) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @mod(lhs_val, rhs_val)); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @mod(lhs_val, rhs_val)); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @mod(lhs_val, rhs_val)); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @mod(lhs_val, rhs_val)); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @mod(lhs_val, rhs_val)); }, else => unreachable, @@ -3973,19 +3720,18 @@ pub const Value = struct { pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intMulScalar(lhs, rhs, allocator, mod); + return intMulScalar(lhs, rhs, ty, allocator, mod); } - pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -4003,20 +3749,20 @@ pub const Value = struct { ); defer allocator.free(limbs_buffer); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, allocator); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, mod); + const elem_val = try val.elemValue(mod, i); + scalar.* = try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intTruncScalar(val, allocator, signedness, bits, mod); + return intTruncScalar(val, ty, allocator, signedness, bits, mod); } /// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`. @@ -4030,19 +3776,25 @@ pub const Value = struct { ) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - var bits_buf: Value.ElemValueBuffer = undefined; - const bits_elem = bits.elemValueBuffer(mod, i, &bits_buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod); + const elem_val = try val.elemValue(mod, i); + const bits_elem = try bits.elemValue(mod, i); + scalar.* = try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); + return intTruncScalar(val, ty, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); } - pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { + pub fn intTruncScalar( + val: Value, + ty: Type, + allocator: Allocator, + signedness: std.builtin.Signedness, + bits: u16, + mod: *Module, + ) !Value { if (bits == 0) return Value.zero; var val_space: Value.BigIntSpace = undefined; @@ -4055,25 +3807,24 @@ pub const Value = struct { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.truncate(val_bigint, signedness, bits); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return shlScalar(lhs, rhs, allocator, mod); + return shlScalar(lhs, rhs, ty, allocator, mod); } - pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -4089,7 +3840,7 @@ pub const Value = struct { .len = undefined, }; result_bigint.shiftLeft(lhs_bigint, shift); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn shlWithOverflow( @@ -4103,10 +3854,8 @@ pub const Value = struct { const overflowed_data = try allocator.alloc(Value, ty.vectorLen(mod)); const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; @@ -4146,7 +3895,7 @@ pub const Value = struct { } return OverflowArithmeticResult{ .overflow_bit = boolToInt(overflowed), - .wrapped_result = try fromBigInt(allocator, result_bigint.toConst()), + .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), }; } @@ -4160,10 +3909,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -4195,7 +3942,7 @@ pub const Value = struct { .len = undefined, }; result_bigint.shiftLeftSat(lhs_bigint, shift, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn shlTrunc( @@ -4208,10 +3955,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -4235,19 +3980,18 @@ pub const Value = struct { pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return shrScalar(lhs, rhs, allocator, mod); + return shrScalar(lhs, rhs, ty, allocator, mod); } - pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -4275,7 +4019,7 @@ pub const Value = struct { .len = undefined, }; result_bigint.shiftRight(lhs_bigint, shift); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn floatNeg( @@ -4284,31 +4028,30 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatNegScalar(val, float_type, arena, target); + return floatNegScalar(val, float_type, arena, mod); } pub fn floatNegScalar( val: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, -val.toFloat(f16)), - 32 => return Value.Tag.float_32.create(arena, -val.toFloat(f32)), - 64 => return Value.Tag.float_64.create(arena, -val.toFloat(f64)), - 80 => return Value.Tag.float_80.create(arena, -val.toFloat(f80)), - 128 => return Value.Tag.float_128.create(arena, -val.toFloat(f128)), + 16 => return Value.Tag.float_16.create(arena, -val.toFloat(f16, mod)), + 32 => return Value.Tag.float_32.create(arena, -val.toFloat(f32, mod)), + 64 => return Value.Tag.float_64.create(arena, -val.toFloat(f64, mod)), + 80 => return Value.Tag.float_80.create(arena, -val.toFloat(f80, mod)), + 128 => return Value.Tag.float_128.create(arena, -val.toFloat(f128, mod)), else => unreachable, } } @@ -4320,19 +4063,16 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatDivScalar(lhs, rhs, float_type, arena, target); + return floatDivScalar(lhs, rhs, float_type, arena, mod); } pub fn floatDivScalar( @@ -4340,32 +4080,33 @@ pub const Value = struct { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, lhs_val / rhs_val); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, lhs_val / rhs_val); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, lhs_val / rhs_val); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, lhs_val / rhs_val); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, lhs_val / rhs_val); }, else => unreachable, @@ -4379,19 +4120,16 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatDivFloorScalar(lhs, rhs, float_type, arena, target); + return floatDivFloorScalar(lhs, rhs, float_type, arena, mod); } pub fn floatDivFloorScalar( @@ -4399,32 +4137,33 @@ pub const Value = struct { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @divFloor(lhs_val, rhs_val)); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @divFloor(lhs_val, rhs_val)); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @divFloor(lhs_val, rhs_val)); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @divFloor(lhs_val, rhs_val)); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @divFloor(lhs_val, rhs_val)); }, else => unreachable, @@ -4438,19 +4177,16 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatDivTruncScalar(lhs, rhs, float_type, arena, target); + return floatDivTruncScalar(lhs, rhs, float_type, arena, mod); } pub fn floatDivTruncScalar( @@ -4458,32 +4194,33 @@ pub const Value = struct { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @divTrunc(lhs_val, rhs_val)); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @divTrunc(lhs_val, rhs_val)); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @divTrunc(lhs_val, rhs_val)); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @divTrunc(lhs_val, rhs_val)); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @divTrunc(lhs_val, rhs_val)); }, else => unreachable, @@ -4497,19 +4234,16 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatMulScalar(lhs, rhs, float_type, arena, target); + return floatMulScalar(lhs, rhs, float_type, arena, mod); } pub fn floatMulScalar( @@ -4517,32 +4251,33 @@ pub const Value = struct { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, lhs_val * rhs_val); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, lhs_val * rhs_val); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, lhs_val * rhs_val); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, lhs_val * rhs_val); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, lhs_val * rhs_val); }, else => unreachable, @@ -4550,39 +4285,38 @@ pub const Value = struct { } pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return sqrtScalar(val, float_type, arena, target); + return sqrtScalar(val, float_type, arena, mod); } - pub fn sqrtScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn sqrtScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @sqrt(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @sqrt(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @sqrt(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @sqrt(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @sqrt(f)); }, else => unreachable, @@ -4590,39 +4324,38 @@ pub const Value = struct { } pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return sinScalar(val, float_type, arena, target); + return sinScalar(val, float_type, arena, mod); } - pub fn sinScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn sinScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @sin(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @sin(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @sin(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @sin(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @sin(f)); }, else => unreachable, @@ -4630,39 +4363,38 @@ pub const Value = struct { } pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return cosScalar(val, float_type, arena, target); + return cosScalar(val, float_type, arena, mod); } - pub fn cosScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn cosScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @cos(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @cos(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @cos(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @cos(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @cos(f)); }, else => unreachable, @@ -4670,39 +4402,38 @@ pub const Value = struct { } pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return tanScalar(val, float_type, arena, target); + return tanScalar(val, float_type, arena, mod); } - pub fn tanScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn tanScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @tan(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @tan(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @tan(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @tan(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @tan(f)); }, else => unreachable, @@ -4710,39 +4441,38 @@ pub const Value = struct { } pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try expScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try expScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return expScalar(val, float_type, arena, target); + return expScalar(val, float_type, arena, mod); } - pub fn expScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn expScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @exp(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @exp(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @exp(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @exp(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @exp(f)); }, else => unreachable, @@ -4750,39 +4480,38 @@ pub const Value = struct { } pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return exp2Scalar(val, float_type, arena, target); + return exp2Scalar(val, float_type, arena, mod); } - pub fn exp2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn exp2Scalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @exp2(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @exp2(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @exp2(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @exp2(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @exp2(f)); }, else => unreachable, @@ -4790,39 +4519,38 @@ pub const Value = struct { } pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try logScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try logScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return logScalar(val, float_type, arena, target); + return logScalar(val, float_type, arena, mod); } - pub fn logScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn logScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @log(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @log(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @log(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @log(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @log(f)); }, else => unreachable, @@ -4830,39 +4558,38 @@ pub const Value = struct { } pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return log2Scalar(val, float_type, arena, target); + return log2Scalar(val, float_type, arena, mod); } - pub fn log2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn log2Scalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @log2(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @log2(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @log2(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @log2(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @log2(f)); }, else => unreachable, @@ -4870,39 +4597,38 @@ pub const Value = struct { } pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return log10Scalar(val, float_type, arena, target); + return log10Scalar(val, float_type, arena, mod); } - pub fn log10Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn log10Scalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @log10(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @log10(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @log10(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @log10(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @log10(f)); }, else => unreachable, @@ -4910,39 +4636,38 @@ pub const Value = struct { } pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return fabsScalar(val, float_type, arena, target); + return fabsScalar(val, float_type, arena, mod); } - pub fn fabsScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn fabsScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @fabs(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @fabs(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @fabs(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @fabs(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @fabs(f)); }, else => unreachable, @@ -4950,39 +4675,38 @@ pub const Value = struct { } pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floorScalar(val, float_type, arena, target); + return floorScalar(val, float_type, arena, mod); } - pub fn floorScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn floorScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @floor(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @floor(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @floor(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @floor(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @floor(f)); }, else => unreachable, @@ -4990,39 +4714,38 @@ pub const Value = struct { } pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return ceilScalar(val, float_type, arena, target); + return ceilScalar(val, float_type, arena, mod); } - pub fn ceilScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn ceilScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @ceil(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @ceil(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @ceil(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @ceil(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @ceil(f)); }, else => unreachable, @@ -5030,39 +4753,38 @@ pub const Value = struct { } pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return roundScalar(val, float_type, arena, target); + return roundScalar(val, float_type, arena, mod); } - pub fn roundScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn roundScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @round(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @round(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @round(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @round(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @round(f)); }, else => unreachable, @@ -5070,39 +4792,38 @@ pub const Value = struct { } pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return truncScalar(val, float_type, arena, target); + return truncScalar(val, float_type, arena, mod); } - pub fn truncScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn truncScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @trunc(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @trunc(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @trunc(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @trunc(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @trunc(f)); }, else => unreachable, @@ -5117,28 +4838,24 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var mulend1_buf: Value.ElemValueBuffer = undefined; - const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf); - var mulend2_buf: Value.ElemValueBuffer = undefined; - const mulend2_elem = mulend2.elemValueBuffer(mod, i, &mulend2_buf); - var addend_buf: Value.ElemValueBuffer = undefined; - const addend_elem = addend.elemValueBuffer(mod, i, &addend_buf); + const mulend1_elem = try mulend1.elemValue(mod, i); + const mulend2_elem = try mulend2.elemValue(mod, i); + const addend_elem = try addend.elemValue(mod, i); scalar.* = try mulAddScalar( float_type.scalarType(mod), mulend1_elem, mulend2_elem, addend_elem, arena, - target, + mod, ); } return Value.Tag.aggregate.create(arena, result_data); } - return mulAddScalar(float_type, mulend1, mulend2, addend, arena, target); + return mulAddScalar(float_type, mulend1, mulend2, addend, arena, mod); } pub fn mulAddScalar( @@ -5147,37 +4864,38 @@ pub const Value = struct { mulend2: Value, addend: Value, arena: Allocator, - target: Target, + mod: *const Module, ) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const m1 = mulend1.toFloat(f16); - const m2 = mulend2.toFloat(f16); - const a = addend.toFloat(f16); + const m1 = mulend1.toFloat(f16, mod); + const m2 = mulend2.toFloat(f16, mod); + const a = addend.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @mulAdd(f16, m1, m2, a)); }, 32 => { - const m1 = mulend1.toFloat(f32); - const m2 = mulend2.toFloat(f32); - const a = addend.toFloat(f32); + const m1 = mulend1.toFloat(f32, mod); + const m2 = mulend2.toFloat(f32, mod); + const a = addend.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @mulAdd(f32, m1, m2, a)); }, 64 => { - const m1 = mulend1.toFloat(f64); - const m2 = mulend2.toFloat(f64); - const a = addend.toFloat(f64); + const m1 = mulend1.toFloat(f64, mod); + const m2 = mulend2.toFloat(f64, mod); + const a = addend.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @mulAdd(f64, m1, m2, a)); }, 80 => { - const m1 = mulend1.toFloat(f80); - const m2 = mulend2.toFloat(f80); - const a = addend.toFloat(f80); + const m1 = mulend1.toFloat(f80, mod); + const m2 = mulend2.toFloat(f80, mod); + const a = addend.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @mulAdd(f80, m1, m2, a)); }, 128 => { - const m1 = mulend1.toFloat(f128); - const m2 = mulend2.toFloat(f128); - const a = addend.toFloat(f128); + const m1 = mulend1.toFloat(f128, mod); + const m2 = mulend2.toFloat(f128, mod); + const a = addend.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @mulAdd(f128, m1, m2, a)); }, else => unreachable, @@ -5186,13 +4904,14 @@ pub const Value = struct { /// If the value is represented in-memory as a series of bytes that all /// have the same value, return that byte value, otherwise null. - pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module, value_buffer: *Payload.U64) !?Value { + pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module) !?Value { const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null; assert(abi_size >= 1); const byte_buffer = try mod.gpa.alloc(u8, abi_size); defer mod.gpa.free(byte_buffer); writeToMemory(val, ty, mod, byte_buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => return null, // TODO: The writeToMemory function was originally created for the purpose // of comptime pointer casting. However, it is now additionally being used @@ -5206,11 +4925,7 @@ pub const Value = struct { for (byte_buffer[1..]) |byte| { if (byte != first_byte) return null; } - value_buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = first_byte, - }; - return initPayload(&value_buffer.base); + return try mod.intValue(Type.u8, first_byte); } pub fn isGenericPoison(val: Value) bool { @@ -5226,30 +4941,6 @@ pub const Value = struct { data: u32, }; - pub const U64 = struct { - base: Payload, - data: u64, - }; - - pub const I64 = struct { - base: Payload, - data: i64, - }; - - pub const BigInt = struct { - base: Payload, - data: []const std.math.big.Limb, - - pub fn asBigInt(self: BigInt) BigIntConst { - const positive = switch (self.base.tag) { - .int_big_positive => true, - .int_big_negative => false, - else => unreachable, - }; - return BigIntConst{ .limbs = self.data, .positive = positive }; - } - }; - pub const Function = struct { base: Payload, data: *Module.Fn, @@ -5452,12 +5143,9 @@ pub const Value = struct { pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; - pub const zero = initTag(.zero); - pub const one = initTag(.one); - pub const negative_one: Value = .{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &negative_one_payload.base }, - }; + pub const zero: Value = .{ .ip_index = .zero, .legacy = undefined }; + pub const one: Value = .{ .ip_index = .one, .legacy = undefined }; + pub const negative_one: Value = .{ .ip_index = .negative_one, .legacy = undefined }; pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined }; @@ -5515,8 +5203,3 @@ pub const Value = struct { } } }; - -var negative_one_payload: Value.Payload.I64 = .{ - .base = .{ .tag = .int_i64 }, - .data = -1, -}; From 6c713b40f76a2df9c06f875026f628a99a5088f0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 6 May 2023 19:34:35 -0700 Subject: [PATCH 040/205] InternPool: add an encoding for arrays with sentinels --- src/InternPool.zig | 63 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 55 insertions(+), 8 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index d2f3bf81fe97..309d76c0935a 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -666,9 +666,12 @@ pub const Tag = enum(u8) { /// An integer type. /// data is number of bits type_int_unsigned, + /// An array type whose length requires 64 bits or which has a sentinel. + /// data is payload to Array. + type_array_big, /// An array type that has no sentinel and whose length fits in 32 bits. /// data is payload to Vector. - type_array, + type_array_small, /// A vector type. /// data is payload to Vector. type_vector, @@ -862,6 +865,25 @@ pub const Vector = struct { child: Index, }; +pub const Array = struct { + len0: u32, + len1: u32, + child: Index, + sentinel: Index, + + pub const Length = packed struct(u64) { + len0: u32, + len1: u32, + }; + + pub fn getLength(a: Array) u64 { + return @bitCast(u64, Length{ + .len0 = a.len0, + .len1 = a.len1, + }); + } +}; + pub const ErrorUnion = struct { error_set_type: Index, payload_type: Index, @@ -958,7 +980,15 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .bits = @intCast(u16, data), }, }, - .type_array => { + .type_array_big => { + const array_info = ip.extraData(Array, data); + return .{ .array_type = .{ + .len = array_info.getLength(), + .child = array_info.child, + .sentinel = array_info.sentinel, + } }; + }, + .type_array_small => { const array_info = ip.extraData(Vector, data); return .{ .array_type = .{ .len = array_info.len, @@ -1094,13 +1124,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, .array_type => |array_type| { - const len = @intCast(u32, array_type.len); // TODO have a big_array encoding - assert(array_type.sentinel == .none); // TODO have a sentinel_array encoding + assert(array_type.child != .none); + + if (std.math.cast(u32, array_type.len)) |len| { + if (array_type.sentinel == .none) { + ip.items.appendAssumeCapacity(.{ + .tag = .type_array_small, + .data = try ip.addExtra(gpa, Vector{ + .len = len, + .child = array_type.child, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } + } + + const length = @bitCast(Array.Length, array_type.len); ip.items.appendAssumeCapacity(.{ - .tag = .type_array, - .data = try ip.addExtra(gpa, Vector{ - .len = len, + .tag = .type_array_big, + .data = try ip.addExtra(gpa, Array{ + .len0 = length.len0, + .len1 = length.len1, .child = array_type.child, + .sentinel = array_type.sentinel, }), }); }, @@ -1488,7 +1534,8 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) { .type_int_signed => 0, .type_int_unsigned => 0, - .type_array => @sizeOf(Vector), + .type_array_small => @sizeOf(Vector), + .type_array_big => @sizeOf(Array), .type_vector => @sizeOf(Vector), .type_pointer => @sizeOf(Pointer), .type_slice => 0, From 9d9e1a29911c0ecf92eb9db10027cf691cd4b07e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 6 May 2023 20:00:47 -0700 Subject: [PATCH 041/205] InternPool: implement indexToKey and equality for int values --- src/InternPool.zig | 67 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 55 insertions(+), 12 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 309d76c0935a..3f6e4793b8fc 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -5,6 +5,8 @@ items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, /// On 32-bit systems, this array is ignored and extra is used for everything. /// On 64-bit systems, this array is used for big integers and associated metadata. +/// Use the helper methods instead of accessing this directly in order to not +/// violate the above mechanism. limbs: std.ArrayListUnmanaged(u64) = .{}, const std = @import("std"); @@ -12,6 +14,7 @@ const Allocator = std.mem.Allocator; const assert = std.debug.assert; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; +const Limb = std.math.big.Limb; const InternPool = @This(); const DeclIndex = enum(u32) { _ }; @@ -233,9 +236,23 @@ pub const Key = union(enum) { .int => |a_info| { const b_info = b.int; - _ = a_info; - _ = b_info; - @panic("TODO"); + return switch (a_info.storage) { + .u64 => |aa| switch (b_info.storage) { + .u64 => |bb| aa == bb, + .i64 => |bb| aa == bb, + .big_int => |bb| bb.orderAgainstScalar(aa) == .eq, + }, + .i64 => |aa| switch (b_info.storage) { + .u64 => |bb| aa == bb, + .i64 => |bb| aa == bb, + .big_int => |bb| bb.orderAgainstScalar(aa) == .eq, + }, + .big_int => |aa| switch (b_info.storage) { + .u64 => |bb| aa.orderAgainstScalar(bb) == .eq, + .i64 => |bb| aa.orderAgainstScalar(bb) == .eq, + .big_int => |bb| aa.eq(bb), + }, + }; }, .enum_tag => |a_info| { @@ -1056,8 +1073,8 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .ty = .comptime_int_type, .storage = .{ .i64 = @bitCast(i32, data) }, } }, - .int_positive => @panic("TODO"), - .int_negative => @panic("TODO"), + .int_positive => indexToKeyBigInt(ip, data, true), + .int_negative => indexToKeyBigInt(ip, data, false), .enum_tag_positive => @panic("TODO"), .enum_tag_negative => @panic("TODO"), .float_f32 => @panic("TODO"), @@ -1068,6 +1085,17 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }; } +fn indexToKeyBigInt(ip: InternPool, limb_index: u32, positive: bool) Key { + const int_info = ip.limbData(Int, limb_index); + return .{ .int = .{ + .ty = int_info.ty, + .storage = .{ .big_int = .{ + .limbs = ip.limbSlice(Int, limb_index, int_info.limbs_len), + .positive = positive, + } }, + } }; +} + pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); @@ -1293,7 +1321,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { try addInt(ip, gpa, int.ty, tag, big_int.limbs); }, inline .i64, .u64 => |x| { - var buf: [2]usize = undefined; + var buf: [2]Limb = undefined; const big_int = BigIntMutable.init(&buf, x).toConst(); const tag: Tag = if (big_int.positive) .int_positive else .int_negative; try addInt(ip, gpa, int.ty, tag, big_int.limbs); @@ -1324,7 +1352,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } -fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const usize) !void { +fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void { const limbs_len = @intCast(u32, limbs.len); try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); ip.items.appendAssumeCapacity(.{ @@ -1360,7 +1388,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { } fn reserveLimbs(ip: *InternPool, gpa: Allocator, n: usize) !void { - switch (@sizeOf(usize)) { + switch (@sizeOf(Limb)) { @sizeOf(u32) => try ip.extra.ensureUnusedCapacity(gpa, n), @sizeOf(u64) => try ip.limbs.ensureUnusedCapacity(gpa, n), else => @compileError("unsupported host"), @@ -1368,7 +1396,7 @@ fn reserveLimbs(ip: *InternPool, gpa: Allocator, n: usize) !void { } fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { - switch (@sizeOf(usize)) { + switch (@sizeOf(Limb)) { @sizeOf(u32) => return addExtraAssumeCapacity(ip, extra), @sizeOf(u64) => {}, else => @compileError("unsupported host"), @@ -1389,8 +1417,8 @@ fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { return result; } -fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const usize) void { - switch (@sizeOf(usize)) { +fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { + switch (@sizeOf(Limb)) { @sizeOf(u32) => ip.extra.appendSliceAssumeCapacity(limbs), @sizeOf(u64) => ip.limbs.appendSliceAssumeCapacity(limbs), else => @compileError("unsupported host"), @@ -1416,7 +1444,7 @@ fn extraData(ip: InternPool, comptime T: type, index: usize) T { /// Asserts the struct has 32-bit fields and the number of fields is evenly divisible by 2. fn limbData(ip: InternPool, comptime T: type, index: usize) T { - switch (@sizeOf(usize)) { + switch (@sizeOf(Limb)) { @sizeOf(u32) => return extraData(ip, T, index), @sizeOf(u64) => {}, else => @compileError("unsupported host"), @@ -1438,6 +1466,21 @@ fn limbData(ip: InternPool, comptime T: type, index: usize) T { return result; } +fn limbSlice(ip: InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { + const field_count = @typeInfo(S).Struct.fields.len; + switch (@sizeOf(Limb)) { + @sizeOf(u32) => { + const start = limb_index + field_count; + return ip.extra.items[start..][0..len]; + }, + @sizeOf(u64) => { + const start = limb_index + @divExact(field_count, 2); + return ip.limbs.items[start..][0..len]; + }, + else => @compileError("unsupported host"), + } +} + test "basic usage" { const gpa = std.testing.allocator; From c1ca16d779a3f3201a5a35a88eff188290b2091a Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 7 May 2023 15:23:12 +0100 Subject: [PATCH 042/205] wip: progress towards compiling tests --- src/Module.zig | 17 ++++-- src/Sema.zig | 99 ++++++++++++++++++++--------------- src/codegen/llvm.zig | 2 +- src/type.zig | 121 +++++++++++++++++++++++++++++-------------- src/value.zig | 108 ++++++++++++++++++++------------------ 5 files changed, 213 insertions(+), 134 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 9315c9efa718..f56235c93370 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -934,9 +934,12 @@ pub const Decl = struct { pub fn isExtern(decl: Decl) bool { assert(decl.has_tv); - return switch (decl.val.tag()) { - .extern_fn => true, - .variable => decl.val.castTag(.variable).?.data.init.ip_index == .unreachable_value, + return switch (decl.val.ip_index) { + .none => switch (decl.val.tag()) { + .extern_fn => true, + .variable => decl.val.castTag(.variable).?.data.init.ip_index == .unreachable_value, + else => false, + }, else => false, }; } @@ -6833,6 +6836,10 @@ pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allo } pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type { + if (std.debug.runtime_safety and info.sentinel != .none) { + const sent_ty = mod.intern_pool.indexToKey(info.sentinel).typeOf(); + assert(sent_ty == info.child); + } const i = try intern(mod, .{ .array_type = info }); return i.toType(); } @@ -6848,6 +6855,10 @@ pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error! } pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type { + if (std.debug.runtime_safety and info.sentinel != .none) { + const sent_ty = mod.intern_pool.indexToKey(info.sentinel).typeOf(); + assert(sent_ty == info.elem_type); + } const i = try intern(mod, .{ .ptr_type = info }); return i.toType(); } diff --git a/src/Sema.zig b/src/Sema.zig index 3aa845c10b8f..e92c6266910a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5146,7 +5146,7 @@ fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air defer anon_decl.deinit(); const decl_index = try anon_decl.finish( - try Type.array(anon_decl.arena(), gop.key_ptr.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), gop.key_ptr.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.str_lit.create(anon_decl.arena(), gop.key_ptr.*), 0, // default alignment ); @@ -15567,7 +15567,7 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. => {}, } const val = try ty.lazyAbiSize(mod, sema.arena); - if (val.tag() == .lazy_size) { + if (val.ip_index == .none and val.tag() == .lazy_size) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); @@ -16006,8 +16006,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai sema.arena, @enumToInt(info.signedness), ); - // bits: comptime_int, - field_values[1] = try mod.intValue(Type.comptime_int, info.bits); + // bits: u16, + field_values[1] = try mod.intValue(Type.u16, info.bits); return sema.addConstant( type_info_ty, @@ -16019,8 +16019,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Float => { const field_values = try sema.arena.alloc(Value, 1); - // bits: comptime_int, - field_values[0] = try mod.intValue(Type.comptime_int, ty.bitSize(mod)); + // bits: u16, + field_values[0] = try mod.intValue(Type.u16, ty.bitSize(mod)); return sema.addConstant( type_info_ty, @@ -25957,7 +25957,21 @@ fn coerceExtra( if (!opts.report_err) return error.NotCoercible; return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }); } - return try sema.addConstant(dest_ty, val); + const key = mod.intern_pool.indexToKey(val.ip_index); + // If the int is represented as a bigint, copy it so we can safely pass it to `mod.intern` + const int_storage: InternPool.Key.Int.Storage = switch (key.int.storage) { + .u64 => |x| .{ .u64 = x }, + .i64 => |x| .{ .i64 = x }, + .big_int => |big_int| .{ .big_int = .{ + .limbs = try sema.arena.dupe(std.math.big.Limb, big_int.limbs), + .positive = big_int.positive, + } }, + }; + const new_val = try mod.intern(.{ .int = .{ + .ty = dest_ty.ip_index, + .storage = int_storage, + } }); + return try sema.addConstant(dest_ty, new_val.toValue()); } if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; @@ -31061,39 +31075,42 @@ pub fn resolveFnTypes(sema: *Sema, fn_info: Type.Payload.Function.Data) CompileE /// Make it so that calling hash() and eql() on `val` will not assert due /// to a type not having its layout resolved. fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { - switch (val.tag()) { - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - return sema.resolveTypeLayout(ty); - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - return sema.resolveTypeLayout(ty); - }, - .comptime_field_ptr => { - const field_ptr = val.castTag(.comptime_field_ptr).?.data; - return sema.resolveLazyValue(field_ptr.field_val); - }, - .eu_payload, - .opt_payload, - => { - const sub_val = val.cast(Value.Payload.SubValue).?.data; - return sema.resolveLazyValue(sub_val); - }, - .@"union" => { - const union_val = val.castTag(.@"union").?.data; - return sema.resolveLazyValue(union_val.val); - }, - .aggregate => { - const aggregate = val.castTag(.aggregate).?.data; - for (aggregate) |elem_val| { - try sema.resolveLazyValue(elem_val); - } - }, - .slice => { - const slice = val.castTag(.slice).?.data; - try sema.resolveLazyValue(slice.ptr); - return sema.resolveLazyValue(slice.len); + switch (val.ip_index) { + .none => switch (val.tag()) { + .lazy_align => { + const ty = val.castTag(.lazy_align).?.data; + return sema.resolveTypeLayout(ty); + }, + .lazy_size => { + const ty = val.castTag(.lazy_size).?.data; + return sema.resolveTypeLayout(ty); + }, + .comptime_field_ptr => { + const field_ptr = val.castTag(.comptime_field_ptr).?.data; + return sema.resolveLazyValue(field_ptr.field_val); + }, + .eu_payload, + .opt_payload, + => { + const sub_val = val.cast(Value.Payload.SubValue).?.data; + return sema.resolveLazyValue(sub_val); + }, + .@"union" => { + const union_val = val.castTag(.@"union").?.data; + return sema.resolveLazyValue(union_val.val); + }, + .aggregate => { + const aggregate = val.castTag(.aggregate).?.data; + for (aggregate) |elem_val| { + try sema.resolveLazyValue(elem_val); + } + }, + .slice => { + const slice = val.castTag(.slice).?.data; + try sema.resolveLazyValue(slice.ptr); + return sema.resolveLazyValue(slice.len); + }, + else => return, }, else => return, } @@ -31200,7 +31217,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { }; for (struct_obj.fields.values(), 0..) |field, i| { - optimized_order[i] = if (!(try sema.typeHasRuntimeBits(field.ty))) + optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty)) @intCast(u32, i) else Module.Struct.omitted_field; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 9b62c5448da7..9d8c3edaf5c6 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2481,7 +2481,7 @@ pub const DeclGen = struct { log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty.fmtDebug(), decl.val.fmtDebug(), }); - assert(decl.val.tag() != .function); + assert(decl.val.ip_index != .none or decl.val.tag() != .function); if (decl.val.castTag(.extern_fn)) |extern_fn| { _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl); } else { diff --git a/src/type.zig b/src/type.zig index 5b18245323b5..e60d21608556 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2471,7 +2471,7 @@ pub const Type = struct { pub fn lazyAbiSize(ty: Type, mod: *Module, arena: Allocator) !Value { switch (try ty.abiSizeAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, - .scalar => |x| return mod.intValue(ty, x), + .scalar => |x| return mod.intValue(Type.comptime_int, x), } } @@ -2504,8 +2504,20 @@ pub const Type = struct { if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target) }; }, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, + else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + }, + .array_type => |array_type| { + const len = array_type.len + @boolToInt(array_type.sentinel != .none); + switch (try array_type.child.toType().abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| return .{ .scalar = len * elem_size }, + .val => switch (strat) { + .sema, .eager => unreachable, + .lazy => |arena| return .{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + }, + } + }, .vector_type => |vector_type| { const opt_sema = switch (strat) { .sema => |sema| sema, @@ -2528,7 +2540,7 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = result }; }, - .opt_type => @panic("TODO"), + .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .bool, @@ -2698,39 +2710,7 @@ pub const Type = struct { .error_set_single, => return AbiSizeAdvanced{ .scalar = 2 }, - .optional => { - const child_type = ty.optionalChild(mod); - - if (child_type.isNoReturn()) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - - if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, - else => |e| return e, - })) return AbiSizeAdvanced{ .scalar = 1 }; - - if (ty.optionalReprIsPayload(mod)) { - return abiSizeAdvanced(child_type, mod, strat); - } - - const payload_size = switch (try child_type.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| elem_size, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, - }, - }; - - // Optional types are represented as a struct with the child type as the first - // field and a boolean as the second. Since the child type's abi alignment is - // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal - // to the child type's ABI alignment. - return AbiSizeAdvanced{ - .scalar = child_type.abiAlignment(mod) + payload_size, - }; - }, + .optional => return ty.abiSizeAdvancedOptional(mod, strat), .error_union => { // This code needs to be kept in sync with the equivalent switch prong @@ -2791,6 +2771,44 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = union_obj.abiSize(mod, have_tag) }; } + fn abiSizeAdvancedOptional( + ty: Type, + mod: *const Module, + strat: AbiAlignmentAdvancedStrat, + ) Module.CompileError!AbiSizeAdvanced { + const child_ty = ty.optionalChild(mod); + + if (child_ty.isNoReturn()) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + + if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, + else => |e| return e, + })) return AbiSizeAdvanced{ .scalar = 1 }; + + if (ty.optionalReprIsPayload(mod)) { + return abiSizeAdvanced(child_ty, mod, strat); + } + + const payload_size = switch (try child_ty.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| elem_size, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + }, + }; + + // Optional types are represented as a struct with the child type as the first + // field and a boolean as the second. Since the child type's abi alignment is + // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal + // to the child type's ABI alignment. + return AbiSizeAdvanced{ + .scalar = child_ty.abiAlignment(mod) + payload_size, + }; + } + fn intAbiSize(bits: u16, target: Target) u64 { const alignment = intAbiAlignment(bits, target); return std.mem.alignForwardGeneric(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment); @@ -2819,8 +2837,19 @@ pub const Type = struct { if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| return int_type.bits, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth() * 2, + }, + .array_type => |array_type| { + const len = array_type.len + @boolToInt(array_type.sentinel != .none); + if (len == 0) return 0; + const elem_ty = array_type.child.toType(); + const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); + if (elem_size == 0) return 0; + const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); + return (len - 1) * 8 * elem_size + elem_bit_size; + }, .vector_type => |vector_type| { const child_ty = vector_type.child.toType(); const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); @@ -3208,6 +3237,20 @@ pub const Type = struct { /// See also `isPtrLikeOptional`. pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .opt_type => |child| switch (child.toType().zigTypeTag(mod)) { + .Pointer => { + const info = child.toType().ptrInfo(mod); + switch (info.size) { + .C => return false, + else => return !info.@"allowzero", + } + }, + .ErrorSet => true, + else => false, + }, + else => false, + }; switch (ty.tag()) { .optional => { const child_ty = ty.castTag(.optional).?.data; diff --git a/src/value.zig b/src/value.zig index c0ea9e149f13..eced9ba34524 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1832,35 +1832,37 @@ pub const Value = struct { } } - switch (lhs.tag()) { - .repeated => return lhs.castTag(.repeated).?.data.compareAllWithZeroAdvancedExtra(op, mod, opt_sema), - .aggregate => { - for (lhs.castTag(.aggregate).?.data) |elem_val| { - if (!(try elem_val.compareAllWithZeroAdvancedExtra(op, mod, opt_sema))) return false; - } - return true; - }, - .empty_array => return true, - .str_lit => { - const str_lit = lhs.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - for (bytes) |byte| { - if (!std.math.compare(byte, op, 0)) return false; - } - return true; - }, - .bytes => { - const bytes = lhs.castTag(.bytes).?.data; - for (bytes) |byte| { - if (!std.math.compare(byte, op, 0)) return false; - } - return true; + switch (lhs.ip_index) { + .none => switch (lhs.tag()) { + .repeated => return lhs.castTag(.repeated).?.data.compareAllWithZeroAdvancedExtra(op, mod, opt_sema), + .aggregate => { + for (lhs.castTag(.aggregate).?.data) |elem_val| { + if (!(try elem_val.compareAllWithZeroAdvancedExtra(op, mod, opt_sema))) return false; + } + return true; + }, + .str_lit => { + const str_lit = lhs.castTag(.str_lit).?.data; + const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + for (bytes) |byte| { + if (!std.math.compare(byte, op, 0)) return false; + } + return true; + }, + .bytes => { + const bytes = lhs.castTag(.bytes).?.data; + for (bytes) |byte| { + if (!std.math.compare(byte, op, 0)) return false; + } + return true; + }, + .float_16 => if (std.math.isNan(lhs.castTag(.float_16).?.data)) return op == .neq, + .float_32 => if (std.math.isNan(lhs.castTag(.float_32).?.data)) return op == .neq, + .float_64 => if (std.math.isNan(lhs.castTag(.float_64).?.data)) return op == .neq, + .float_80 => if (std.math.isNan(lhs.castTag(.float_80).?.data)) return op == .neq, + .float_128 => if (std.math.isNan(lhs.castTag(.float_128).?.data)) return op == .neq, + else => {}, }, - .float_16 => if (std.math.isNan(lhs.castTag(.float_16).?.data)) return op == .neq, - .float_32 => if (std.math.isNan(lhs.castTag(.float_32).?.data)) return op == .neq, - .float_64 => if (std.math.isNan(lhs.castTag(.float_64).?.data)) return op == .neq, - .float_80 => if (std.math.isNan(lhs.castTag(.float_80).?.data)) return op == .neq, - .float_128 => if (std.math.isNan(lhs.castTag(.float_128).?.data)) return op == .neq, else => {}, } return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); @@ -2404,37 +2406,43 @@ pub const Value = struct { }; pub fn isComptimeMutablePtr(val: Value) bool { - return switch (val.tag()) { - .decl_ref_mut, .comptime_field_ptr => true, - .elem_ptr => isComptimeMutablePtr(val.castTag(.elem_ptr).?.data.array_ptr), - .field_ptr => isComptimeMutablePtr(val.castTag(.field_ptr).?.data.container_ptr), - .eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data.container_ptr), - .opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data.container_ptr), - .slice => isComptimeMutablePtr(val.castTag(.slice).?.data.ptr), + return switch (val.ip_index) { + .none => switch (val.tag()) { + .decl_ref_mut, .comptime_field_ptr => true, + .elem_ptr => isComptimeMutablePtr(val.castTag(.elem_ptr).?.data.array_ptr), + .field_ptr => isComptimeMutablePtr(val.castTag(.field_ptr).?.data.container_ptr), + .eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data.container_ptr), + .opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data.container_ptr), + .slice => isComptimeMutablePtr(val.castTag(.slice).?.data.ptr), + else => false, + }, else => false, }; } pub fn canMutateComptimeVarState(val: Value) bool { if (val.isComptimeMutablePtr()) return true; - switch (val.tag()) { - .repeated => return val.castTag(.repeated).?.data.canMutateComptimeVarState(), - .eu_payload => return val.castTag(.eu_payload).?.data.canMutateComptimeVarState(), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), - .opt_payload => return val.castTag(.opt_payload).?.data.canMutateComptimeVarState(), - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), - .aggregate => { - const fields = val.castTag(.aggregate).?.data; - for (fields) |field| { - if (field.canMutateComptimeVarState()) return true; - } - return false; + return switch (val.ip_index) { + .none => switch (val.tag()) { + .repeated => return val.castTag(.repeated).?.data.canMutateComptimeVarState(), + .eu_payload => return val.castTag(.eu_payload).?.data.canMutateComptimeVarState(), + .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), + .opt_payload => return val.castTag(.opt_payload).?.data.canMutateComptimeVarState(), + .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), + .aggregate => { + const fields = val.castTag(.aggregate).?.data; + for (fields) |field| { + if (field.canMutateComptimeVarState()) return true; + } + return false; + }, + .@"union" => return val.cast(Payload.Union).?.data.val.canMutateComptimeVarState(), + .slice => return val.castTag(.slice).?.data.ptr.canMutateComptimeVarState(), + else => return false, }, - .@"union" => return val.cast(Payload.Union).?.data.val.canMutateComptimeVarState(), - .slice => return val.castTag(.slice).?.data.ptr.canMutateComptimeVarState(), else => return false, - } + }; } /// Gets the decl referenced by this pointer. If the pointer does not point From 4c3c605e5f53c91430efac821ce1b863cbb5bf06 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 7 May 2023 12:53:14 -0700 Subject: [PATCH 043/205] InternPool: add getCoercedInt to avoid copy in Sema --- src/InternPool.zig | 20 ++++++++++++++++++++ src/Sema.zig | 15 +-------------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 3f6e4793b8fc..f6d594cbbbf3 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1535,6 +1535,26 @@ pub fn slicePtrType(ip: InternPool, i: Index) Index { } } +/// Given an existing integer value, returns the same numerical value but with +/// the supplied type. +pub fn getCoercedInt(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { + const key = ip.indexToKey(val); + // The key cannot be passed directly to `get`, otherwise in the case of + // big_int storage, the limbs would be invalidated before they are read. + // Here we pre-reserve the limbs to ensure that the logic in `addInt` will + // not use an invalidated limbs pointer. + switch (key.int.storage) { + .u64, .i64 => {}, + .big_int => |big_int| { + try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); + }, + } + return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = key.int.storage, + } }); +} + pub fn dump(ip: InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } diff --git a/src/Sema.zig b/src/Sema.zig index e92c6266910a..dc5bb1cdea5f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -25957,20 +25957,7 @@ fn coerceExtra( if (!opts.report_err) return error.NotCoercible; return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }); } - const key = mod.intern_pool.indexToKey(val.ip_index); - // If the int is represented as a bigint, copy it so we can safely pass it to `mod.intern` - const int_storage: InternPool.Key.Int.Storage = switch (key.int.storage) { - .u64 => |x| .{ .u64 = x }, - .i64 => |x| .{ .i64 = x }, - .big_int => |big_int| .{ .big_int = .{ - .limbs = try sema.arena.dupe(std.math.big.Limb, big_int.limbs), - .positive = big_int.positive, - } }, - }; - const new_val = try mod.intern(.{ .int = .{ - .ty = dest_ty.ip_index, - .storage = int_storage, - } }); + const new_val = try mod.intern_pool.getCoercedInt(sema.gpa, val.ip_index, dest_ty.ip_index); return try sema.addConstant(dest_ty, new_val.toValue()); } if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { From 2ffef605c75b62ba49e21bfb3256537a4a2c0a5e Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 7 May 2023 22:12:04 +0100 Subject: [PATCH 044/205] Replace uses of Value.zero, Value.one, Value.negative_one This is a bit nasty, mainly because Type.onePossibleValue is now errorable, which is a quite viral change. --- src/Air.zig | 2 +- src/Module.zig | 12 +- src/Sema.zig | 257 +++++++++++++++++++++-------------- src/TypedValue.zig | 13 +- src/arch/aarch64/CodeGen.zig | 4 +- src/arch/arm/CodeGen.zig | 4 +- src/arch/riscv64/CodeGen.zig | 4 +- src/arch/sparc64/CodeGen.zig | 4 +- src/arch/wasm/CodeGen.zig | 10 +- src/arch/x86_64/CodeGen.zig | 12 +- src/codegen.zig | 8 +- src/codegen/c.zig | 64 ++++----- src/codegen/llvm.zig | 18 +-- src/codegen/spirv.zig | 10 +- src/type.zig | 44 +++--- src/value.zig | 43 +++--- 16 files changed, 286 insertions(+), 223 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 549583e69790..8059b9e57f07 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1485,7 +1485,7 @@ pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index { } /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Inst.Ref, mod: *const Module) ?Value { +pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const ref_int = @enumToInt(inst); if (ref_int < ref_start_index) { const ip_index = @intToEnum(InternPool.Index, ref_int); diff --git a/src/Module.zig b/src/Module.zig index f56235c93370..3f5dc8039e10 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5750,7 +5750,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const arg_val = if (!arg_tv.val.isGenericPoison()) arg_tv.val - else if (arg_tv.ty.onePossibleValue(mod)) |opv| + else if (try arg_tv.ty.onePossibleValue(mod)) |opv| opv else break :t arg_tv.ty; @@ -6887,6 +6887,16 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { } pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { + if (std.debug.runtime_safety) { + // TODO: decide if this also works for ABI int types like enums + const tag = ty.zigTypeTag(mod); + assert(tag == .Int or tag == .ComptimeInt); + } + if (@TypeOf(x) == comptime_int) { + if (comptime std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); + if (comptime std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); + @compileError("Out-of-range comptime_int passed to Module.intValue"); + } if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); var limbs_buffer: [4]usize = undefined; diff --git a/src/Sema.zig b/src/Sema.zig index dc5bb1cdea5f..9b1da7498205 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3062,9 +3062,9 @@ fn zirEnumDecl( } } else if (any_values) { const tag_val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one, enum_obj.tag_ty) + try sema.intAdd(val, try mod.intValue(enum_obj.tag_ty, 1), enum_obj.tag_ty) else - Value.zero; + try mod.intValue(enum_obj.tag_ty, 0); last_tag_val = tag_val; const copied_tag_val = try tag_val.copy(decl_arena_allocator); const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ @@ -4709,7 +4709,7 @@ fn zirValidateArrayInit( // Determine whether the value stored to this pointer is comptime-known. if (array_ty.isTuple()) { - if (array_ty.structFieldValueComptime(mod, i)) |opv| { + if (try array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv; continue; } @@ -8132,7 +8132,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (!op_ty.isAnyError()) { const names = op_ty.errorSetNames(); switch (names.len) { - 0 => return sema.addConstant(Type.err_int, Value.zero), + 0 => return sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)), 1 => return sema.addIntUnsigned(Type.err_int, sema.mod.global_error_set.get(names[0]).?), else => {}, } @@ -8167,7 +8167,7 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety()) { const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand); - const zero_val = try sema.addConstant(Type.err_int, Value.zero); + const zero_val = try sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)); const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val); const ok = try block.addBinOp(.bit_and, is_lt_len, is_non_zero); try sema.addSafetyCheck(block, ok, .invalid_error_code); @@ -9656,7 +9656,7 @@ fn intCast( if (wanted_bits == 0) { const ok = if (is_vector) ok: { - const zeros = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zeros = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros); const is_in_range = try block.addCmpVector(operand, zero_inst, .eq); const all_in_range = try block.addInst(.{ @@ -9665,7 +9665,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(sema.typeOf(operand), Value.zero); + const zero_inst = try sema.addConstant(sema.typeOf(operand), try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst); break :ok is_in_range; }; @@ -9705,8 +9705,9 @@ fn intCast( // If the destination type is signed, then we need to double its // range to account for negative values. const dest_range_val = if (wanted_info.signedness == .signed) range_val: { - const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, sema.mod); - break :range_val try sema.intAdd(range_minus_one, Value.one, unsigned_operand_ty); + const one = try mod.intValue(unsigned_operand_ty, 1); + const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, sema.mod); + break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty); } else dest_max_val; const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val); @@ -9747,7 +9748,7 @@ fn intCast( // no shrinkage, yes sign loss // requirement: signed to unsigned >= 0 const ok = if (is_vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zero_val = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); const zero_inst = try sema.addConstant(operand_ty, zero_val); const is_in_range = try block.addCmpVector(operand, zero_inst, .gte); const all_in_range = try block.addInst(.{ @@ -9759,7 +9760,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(operand_ty, Value.zero); + const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst); break :ok is_in_range; }; @@ -11250,7 +11251,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({ // Previous validation has resolved any possible lazy values. - item = try sema.intAddScalar(item, Value.one, operand_ty); + item = try sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty); }) { cases_len += 1; @@ -11696,7 +11697,7 @@ const RangeSetUnhandledIterator = struct { fn next(it: *RangeSetUnhandledIterator) !?Value { while (it.range_i < it.ranges.len) : (it.range_i += 1) { if (!it.first) { - it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty); } it.first = false; if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { @@ -11705,7 +11706,7 @@ const RangeSetUnhandledIterator = struct { it.cur = it.ranges[it.range_i].last; } if (!it.first) { - it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty); } it.first = false; if (it.cur.compareScalar(.lte, it.max, it.ty, it.sema.mod)) { @@ -12150,7 +12151,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. embed_file.owner_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), embed_file.bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), embed_file.bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), 0, // default alignment ); @@ -12235,14 +12236,14 @@ fn zirShl( var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(sema.mod, i); - if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { + if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { + } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -12348,7 +12349,7 @@ fn zirShl( }) else ov_bit; - const zero_ov = try sema.addConstant(Type.u1, Value.zero); + const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, no_ov, .shl_overflow); @@ -12417,14 +12418,14 @@ fn zirShr( var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(sema.mod, i); - if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { + if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { + } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -13156,9 +13157,9 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) + try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) else - try sema.resolveInst(.zero); + try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13180,9 +13181,9 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) + try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) else - try sema.resolveInst(.zero); + try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13293,9 +13294,14 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13318,7 +13324,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13427,9 +13433,14 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.failWithUseOfUndef(block, rhs_src); } else { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13507,8 +13518,13 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else ok: { const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (resolved_type.zigTypeTag(mod) == .Vector) { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const eql = try block.addCmpVector(remainder, zero, .eq); break :ok try block.addInst(.{ @@ -13519,7 +13535,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, }); } else { - const zero = try sema.addConstant(resolved_type, Value.zero); + const zero = try sema.addConstant(resolved_type, scalar_zero); const is_in_range = try block.addBinOp(.cmp_eq, remainder, zero); break :ok is_in_range; } @@ -13592,9 +13608,14 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13612,7 +13633,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13708,9 +13729,14 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13727,7 +13753,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13862,8 +13888,9 @@ fn addDivByZeroSafety( if (maybe_rhs_val != null) return; const mod = sema.mod; + const scalar_zero = if (is_int) try mod.intValue(resolved_type.scalarType(mod), 0) else Value.float_zero; // TODO migrate to internpool const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const ok = try block.addCmpVector(casted_rhs, zero, .neq); break :ok try block.addInst(.{ @@ -13874,7 +13901,7 @@ fn addDivByZeroSafety( } }, }); } else ok: { - const zero = try sema.addConstant(resolved_type, Value.zero); + const zero = try sema.addConstant(resolved_type, scalar_zero); break :ok try block.addBinOp(if (is_int) .cmp_neq else .cmp_neq_optimized, casted_rhs, zero); }; try sema.addSafetyCheck(block, ok, .divide_by_zero); @@ -13946,9 +13973,14 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.failWithUseOfUndef(block, lhs_src); } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } else if (lhs_scalar_ty.isSignedInt(mod)) { @@ -14325,6 +14357,7 @@ fn zirOverflowArithmetic( wrapped: Value = Value.@"unreachable", overflow_bit: Value, } = result: { + const zero = try mod.intValue(dest_ty.scalarType(mod), 0); switch (zir_tag) { .add_with_overflow => { // If either of the arguments is zero, `false` is returned and the other is stored @@ -14332,12 +14365,12 @@ fn zirOverflowArithmetic( // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14358,7 +14391,7 @@ fn zirOverflowArithmetic( if (rhs_val.isUndef()) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; @@ -14373,12 +14406,13 @@ fn zirOverflowArithmetic( // If either of the arguments is zero, the result is zero and no overflow occured. // If either of the arguments is one, the result is the other and no overflow occured. // Otherwise, if either of the arguments is undefined, both results are undefined. + const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; - } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; } } } @@ -14386,9 +14420,9 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef()) { if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; - } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; + } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } } @@ -14410,12 +14444,12 @@ fn zirOverflowArithmetic( // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14766,6 +14800,11 @@ fn analyzeArithmetic( // If either of the operands are inf, and the other operand is zero, // the result is nan. // If either of the operands are nan, the result is nan. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.isNan()) { @@ -14783,11 +14822,11 @@ fn analyzeArithmetic( break :lz; } const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_rhs; } } @@ -14813,11 +14852,11 @@ fn analyzeArithmetic( break :rz; } const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14849,15 +14888,20 @@ fn analyzeArithmetic( // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_rhs; } } @@ -14869,11 +14913,11 @@ fn analyzeArithmetic( } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14892,15 +14936,20 @@ fn analyzeArithmetic( // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_rhs; } } @@ -14911,11 +14960,11 @@ fn analyzeArithmetic( } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14968,7 +15017,7 @@ fn analyzeArithmetic( }) else ov_bit; - const zero_ov = try sema.addConstant(Type.u1, Value.zero); + const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, no_ov, .integer_overflow); @@ -15785,7 +15834,7 @@ fn zirBuiltinSrc( const name = std.mem.span(fn_owner_decl.name); const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len - 1, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len - 1, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes), 0, // default alignment ); @@ -15798,7 +15847,7 @@ fn zirBuiltinSrc( // The compiler must not call realpath anywhere. const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena()); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), name.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), name.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), 0, // default alignment ); @@ -16148,7 +16197,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16256,7 +16305,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16344,7 +16393,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16454,7 +16503,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16496,7 +16545,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16692,7 +16741,7 @@ fn typeInfoNamespaceDecls( defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0)); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -17884,7 +17933,7 @@ fn zirStructInit( } found_fields[field_index] = item.data.field_type; field_inits[field_index] = try sema.resolveInst(item.data.init); - if (!is_packed) if (resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { + if (!is_packed) if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveMaybeUndefVal(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; @@ -18544,8 +18593,8 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(inst_data.operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(Type.u1); - if (val.toBool(mod)) return sema.addConstant(Type.u1, Value.one); - return sema.addConstant(Type.u1, Value.zero); + if (val.toBool(mod)) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1)); + return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); } return block.addUnOp(.bool_to_int, operand); } @@ -19761,7 +19810,7 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const bytes = try ty.nameAllocArena(anon_decl.arena(), mod); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -19804,17 +19853,17 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.requireRuntimeBlock(block, inst_data.src(), operand_src); if (dest_ty.intInfo(mod).bits == 0) { if (block.wantSafety()) { - const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, Value.zero)); + const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0))); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } - return sema.addConstant(dest_ty, Value.zero); + return sema.addConstant(dest_ty, try mod.intValue(dest_ty, 0)); } const result = try block.addTyOp(if (block.float_mode == .Optimized) .float_to_int_optimized else .float_to_int, dest_ty, operand); if (block.wantSafety()) { const back = try block.addTyOp(.int_to_float, operand_ty, result); const diff = try block.addBinOp(.sub, operand, back); - const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, Value.one)); - const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, Value.negative_one)); + const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 1))); + const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, -1))); const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } @@ -21398,7 +21447,7 @@ fn analyzeShuffle( expand_mask_values[i] = try mod.intValue(Type.comptime_int, i); } while (i < max_len) : (i += 1) { - expand_mask_values[i] = Value.negative_one; + expand_mask_values[i] = try mod.intValue(Type.comptime_int, -1); } const expand_mask = try Value.Tag.aggregate.create(sema.arena, expand_mask_values); @@ -24504,7 +24553,7 @@ fn finishFieldCallBind( const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { - if (container_ty.structFieldValueComptime(mod, field_index)) |default_val| { + if (try container_ty.structFieldValueComptime(mod, field_index)) |default_val| { return .{ .direct = try sema.addConstant(field_ty, default_val) }; } } @@ -24815,7 +24864,7 @@ fn tupleFieldValByIndex( const mod = sema.mod; const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); } @@ -24828,7 +24877,7 @@ fn tupleFieldValByIndex( return sema.addConstant(field_ty, field_values[field_index]); } - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return sema.addConstant(field_ty, default_val); } @@ -25205,7 +25254,7 @@ fn tupleFieldPtr( .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod), }); - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ .field_ty = field_ty, .field_val = default_val, @@ -25256,13 +25305,13 @@ fn tupleField( const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); // comptime field } if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); - return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, mod, field_index)); + return sema.addConstant(field_ty, try tuple_val.fieldValue(tuple_ty, mod, field_index)); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); @@ -25812,7 +25861,7 @@ fn coerceExtra( if (inst_info.size == .Slice) { assert(dest_info.sentinel == null); if (inst_info.sentinel == null or - !inst_info.sentinel.?.eql(Value.zero, dest_info.pointee_type, sema.mod)) + !inst_info.sentinel.?.eql(try mod.intValue(dest_info.pointee_type, 0), dest_info.pointee_type, sema.mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -25879,7 +25928,7 @@ fn coerceExtra( try mod.intValue(Type.usize, dest_info.@"align") else try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), - .len = Value.zero, + .len = try mod.intValue(Type.usize, 0), }); return sema.addConstant(dest_ty, slice_val); } @@ -28234,7 +28283,7 @@ fn beginComptimePtrLoad( const field_ty = field_ptr.container_ty.structFieldType(field_index); deref.pointee = TypedValue{ .ty = field_ty, - .val = tv.val.fieldValue(tv.ty, mod, field_index), + .val = try tv.val.fieldValue(tv.ty, mod, field_index), }; } break :blk deref; @@ -32532,9 +32581,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk try val.copy(decl_arena_allocator); } else blk: { const val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one, int_tag_ty) + try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty) else - Value.zero; + try mod.intValue(int_tag_ty, 0); last_tag_val = val; break :blk try val.copy(decl_arena_allocator); @@ -32903,7 +32952,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } @@ -33049,7 +33098,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } if (enum_obj.fields.count() == 1) { if (enum_obj.values.count() == 0) { - return Value.zero; // auto-numbered + return try mod.intValue(ty, 0); // auto-numbered } else { return enum_obj.values.keys()[0]; } @@ -33066,7 +33115,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { switch (enum_obj.fields.count()) { 0 => return Value.@"unreachable", 1 => if (enum_obj.values.count() == 0) { - return Value.zero; // auto-numbered + return try mod.intValue(ty, 0); // auto-numbered } else { return enum_obj.values.keys()[0]; }, @@ -33078,14 +33127,14 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const enum_simple = resolved_ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { 0 => return Value.@"unreachable", - 1 => return Value.zero, + 1 => return try mod.intValue(ty, 0), else => return null, } }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 28212a164c86..828fb610d492 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -61,7 +61,10 @@ pub fn format( ) !void { _ = options; comptime std.debug.assert(fmt.len == 0); - return ctx.tv.print(writer, 3, ctx.mod); + return ctx.tv.print(writer, 3, ctx.mod) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function + else => |e| return e, + }; } /// Prints the Value according to the Type, not according to the Value Tag. @@ -70,7 +73,7 @@ pub fn print( writer: anytype, level: u8, mod: *Module, -) @TypeOf(writer).Error!void { +) (@TypeOf(writer).Error || Allocator.Error)!void { var val = tv.val; var ty = tv.ty; if (val.isVariable(mod)) @@ -95,7 +98,7 @@ pub fn print( } try print(.{ .ty = ty.structFieldType(i), - .val = val.fieldValue(ty, mod, i), + .val = try val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (ty.structFieldCount() > max_aggregate_items) { @@ -112,7 +115,7 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = val.fieldValue(ty, mod, i); + const elem = try val.fieldValue(ty, mod, i); if (elem.isUndef()) break :str; buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; } @@ -129,7 +132,7 @@ pub fn print( if (i != 0) try writer.writeAll(", "); try print(.{ .ty = elem_ty, - .val = val.fieldValue(ty, mod, i), + .val = try val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (len > max_aggregate_items) { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 95a8350c7d04..ea3814a20e33 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4311,7 +4311,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -6154,7 +6154,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); switch (self.air.instructions.items(.tag)[inst_index]) { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index cc2bc3a61346..967a6dd7538a 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4291,7 +4291,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -6101,7 +6101,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); switch (self.air.instructions.items(.tag)[inst_index]) { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 1008d527f6a1..5cf621488e9f 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1743,7 +1743,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } } - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); @@ -2551,7 +2551,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); switch (self.air.instructions.items(.tag)[inst_index]) { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 4231222d4b7e..2cb35460c2ac 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1343,7 +1343,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -4575,7 +4575,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { return self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref, mod).?, + .val = (try self.air.value(ref, mod)).?, }); } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 327e2c13e043..36b805cf9440 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -789,7 +789,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { assert(!gop.found_existing); const mod = func.bin_file.base.options.module.?; - const val = func.air.value(ref, mod).?; + const val = (try func.air.value(ref, mod)).?; const ty = func.typeOf(ref); if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; @@ -2195,7 +2195,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod); const callee: ?Decl.Index = blk: { - const func_val = func.air.value(pl_op.operand, mod) orelse break :blk null; + const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; if (func_val.castTag(.function)) |function| { _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl); @@ -3138,7 +3138,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const is_pl = val.errorUnionIsPayload(); - const err_val = if (!is_pl) val else Value.zero; + const err_val = if (!is_pl) val else try mod.intValue(error_type, 0); return func.lowerConstant(err_val, error_type); } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); @@ -3792,7 +3792,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { errdefer func.gpa.free(values); for (items, 0..) |ref, i| { - const item_val = func.air.value(ref, mod).?; + const item_val = (try func.air.value(ref, mod)).?; const int_val = func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; @@ -5048,7 +5048,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(result_ty); const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset for (elements, 0..) |elem, elem_index| { - if (result_ty.structFieldValueComptime(mod, elem_index) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue; const elem_ty = result_ty.structFieldType(elem_index); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 51c6bc79e648..b208656a41f5 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2768,7 +2768,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const full_ty = try mod.vectorType(.{ .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), - .child = src_ty.childType(mod).ip_index, + .child = elem_ty.ip_index, }); const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); @@ -8107,7 +8107,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (if (func_value.castTag(.function)) |func_payload| func_payload.data.owner_decl else if (func_value.castTag(.decl_ref)) |decl_ref_payload| @@ -11265,7 +11265,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .{ .immediate = result_ty.abiSize(mod) }, ); for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); @@ -11337,7 +11337,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } } } else for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); @@ -11601,7 +11601,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const gop = try self.const_tracking.getOrPut(self.gpa, inst); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref, mod).?, + .val = (try self.air.value(ref, mod)).?, })); break :tracking gop.value_ptr; }, @@ -11614,7 +11614,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } } - return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref, mod).? }); + return self.genTypedValue(.{ .ty = ty, .val = (try self.air.value(ref, mod)).? }); } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { diff --git a/src/codegen.zig b/src/codegen.zig index 9c9868892f4c..8bd478bf7cde 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -675,7 +675,7 @@ pub fn generateSymbol( const is_payload = typed_value.val.errorUnionIsPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const err_val = if (is_payload) Value.zero else typed_value.val; + const err_val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val; return generateSymbol(bin_file, src_loc, .{ .ty = error_ty, .val = err_val, @@ -690,7 +690,7 @@ pub fn generateSymbol( if (error_align > payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.zero else typed_value.val, + .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -722,7 +722,7 @@ pub fn generateSymbol( const begin = code.items.len; switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.zero else typed_value.val, + .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -1280,7 +1280,7 @@ pub fn genTypedValue( if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) typed_value.val else Value.zero; + const err_val = if (!is_pl) typed_value.val else try mod.intValue(error_type, 0); return genTypedValue(bin_file, src_loc, .{ .ty = error_type, .val = err_val, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 9443c2298a72..aaeec055626d 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -287,7 +287,7 @@ pub const Function = struct { if (gop.found_existing) return gop.value_ptr.*; const mod = f.object.dg.module; - const val = f.air.value(ref, mod).?; + const val = (try f.air.value(ref, mod)).?; const ty = f.typeOf(ref); const result: CValue = if (lowersToArray(ty, mod)) result: { @@ -356,7 +356,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; return f.object.dg.renderValue(w, ty, val, location); }, .undef => |ty| return f.object.dg.renderValue(w, ty, Value.undef, location), @@ -369,7 +369,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; try w.writeAll("(*"); try f.object.dg.renderValue(w, ty, val, .Other); return w.writeByte(')'); @@ -383,7 +383,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; try f.object.dg.renderValue(w, ty, val, .Other); try w.writeByte('.'); return f.writeCValue(w, member, .Other); @@ -397,7 +397,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; try w.writeByte('('); try f.object.dg.renderValue(w, ty, val, .Other); try w.writeAll(")->"); @@ -690,7 +690,7 @@ pub const DeclGen = struct { location, ); try writer.print(") + {})", .{ - try dg.fmtIntLiteral(Type.usize, Value.one, .Other), + try dg.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1), .Other), }); }, } @@ -1253,7 +1253,7 @@ pub const DeclGen = struct { .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); const error_ty = ty.errorUnionSet(); - const error_val = if (val.errorUnionIsPayload()) Value.zero else val; + const error_val = if (val.errorUnionIsPayload()) try mod.intValue(Type.anyerror, 0) else val; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, error_val, location); @@ -3611,7 +3611,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.typeOf(bin_op.rhs); - const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -4183,7 +4183,7 @@ fn airCall( callee: { known: { const fn_decl = fn_decl: { - const callee_val = f.air.value(pl_op.operand, mod) orelse break :known; + const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known; break :fn_decl switch (callee_val.tag()) { .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl, .function => callee_val.castTag(.function).?.data.owner_decl, @@ -4269,7 +4269,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const name = f.air.nullTerminatedString(pl_op.payload); - const operand_is_undef = if (f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; + const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4735,7 +4735,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, Type.usize); try writer.writeByte(')'); } - try f.object.dg.renderValue(writer, condition_ty, f.air.value(item, mod).?, .Other); + try f.object.dg.renderValue(writer, condition_ty, (try f.air.value(item, mod)).?, .Other); try writer.writeByte(':'); } try writer.writeByte(' '); @@ -5069,7 +5069,7 @@ fn airIsNull( // operand is a regular pointer, test `operand !=/== NULL` TypedValue{ .ty = optional_ty, .val = Value.null } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) - TypedValue{ .ty = payload_ty, .val = Value.zero } + TypedValue{ .ty = payload_ty, .val = try mod.intValue(payload_ty, 0) } else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf, mod); @@ -5325,7 +5325,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { }, .end => { try f.writeCValue(writer, field_ptr_val, .Other); - try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); }, } @@ -5378,7 +5378,7 @@ fn fieldPtr( .end => { try writer.writeByte('('); try f.writeCValue(writer, container_ptr_val, .Other); - try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); }, } @@ -5546,7 +5546,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { else try f.writeCValueMember(writer, operand, .{ .identifier = "error" }) else - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Initializer); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Initializer); } try writer.writeAll(";\n"); return local; @@ -5673,7 +5673,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { try f.writeCValueDeref(writer, operand); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n "); return operand; @@ -5681,7 +5681,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); try f.writeCValueDeref(writer, operand); try writer.writeAll(".error = "); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n"); // Then return the payload pointer (only if it is used) @@ -5737,7 +5737,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { else try f.writeCValueMember(writer, local, .{ .identifier = "error" }); try a.assign(f, writer); - try f.object.dg.renderValue(writer, err_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, err_ty, try mod.intValue(err_ty, 0), .Other); try a.end(f, writer); } return local; @@ -5768,11 +5768,11 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const else try f.writeCValue(writer, operand, .Other) else - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeByte(' '); try writer.writeAll(operator); try writer.writeByte(' '); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n"); return local; } @@ -5798,7 +5798,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); - try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); + try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))}); } else try f.writeCValue(writer, operand, .Initializer); try writer.writeAll("; "); @@ -6022,7 +6022,7 @@ fn airCmpBuiltinCall( try writer.writeByte(')'); if (!ref_ret) try writer.print(" {s} {}", .{ compareOperatorC(operator), - try f.fmtIntLiteral(Type.i32, Value.zero), + try f.fmtIntLiteral(Type.i32, try mod.intValue(Type.i32, 0)), }); try writer.writeAll(";\n"); try v.end(f, inst, writer); @@ -6278,7 +6278,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const value = try f.resolveInst(bin_op.rhs); const elem_ty = f.typeOf(bin_op.rhs); const elem_abi_size = elem_ty.abiSize(mod); - const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; const writer = f.object.writer(); if (val_is_undef) { @@ -6326,7 +6326,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll("for ("); try f.writeCValue(writer, index, .Other); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, Type.usize, Value.zero, .Initializer); + try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, 0), .Initializer); try writer.writeAll("; "); try f.writeCValue(writer, index, .Other); try writer.writeAll(" != "); @@ -6677,27 +6677,27 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { - .Or, .Xor, .Add => Value.zero, + .Or, .Xor, .Add => try mod.intValue(scalar_ty, 0), .And => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.one, + .Bool => try mod.intValue(Type.comptime_int, 1), else => switch (scalar_ty.intInfo(mod).signedness) { .unsigned => try scalar_ty.maxIntScalar(mod), - .signed => Value.negative_one, + .signed => try mod.intValue(scalar_ty, -1), }, }, .Min => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.one, + .Bool => try mod.intValue(Type.comptime_int, 1), .Int => try scalar_ty.maxIntScalar(mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, .Max => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.zero, + .Bool => try mod.intValue(scalar_ty, 0), .Int => try scalar_ty.minInt(stack.get(), mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, - .Mul => Value.one, + .Mul => try mod.intValue(Type.comptime_int, 1), }, .Initializer); try writer.writeAll(";\n"); @@ -7686,13 +7686,13 @@ const Vectorize = struct { try writer.writeAll("for ("); try f.writeCValue(writer, local, .Other); - try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); + try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))}); try f.writeCValue(writer, local, .Other); try writer.print(" < {d}; ", .{ try f.fmtIntLiteral(Type.usize, len_val), }); try f.writeCValue(writer, local, .Other); - try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); f.object.indent_writer.pushIndent(); break :index .{ .index = local }; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 9d8c3edaf5c6..c42719d07cd5 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2854,7 +2854,7 @@ pub const DeclGen = struct { }, .Array => { const elem_ty = t.childType(mod); - assert(elem_ty.onePossibleValue(mod) == null); + if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null); const elem_llvm_ty = try dg.lowerType(elem_ty); const total_len = t.arrayLen(mod) + @boolToInt(t.sentinel(mod) != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); @@ -3588,7 +3588,7 @@ pub const DeclGen = struct { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) tv.val else Value.zero; + const err_val = if (!is_pl) tv.val else try mod.intValue(Type.anyerror, 0); return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } @@ -3596,7 +3596,7 @@ pub const DeclGen = struct { const error_align = Type.anyerror.abiAlignment(mod); const llvm_error_value = try dg.lowerValue(.{ .ty = Type.anyerror, - .val = if (is_pl) Value.zero else tv.val, + .val = if (is_pl) try mod.intValue(Type.anyerror, 0) else tv.val, }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, @@ -4476,7 +4476,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const llvm_val = try self.resolveValue(.{ .ty = self.typeOf(inst), - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); gop.value_ptr.* = llvm_val; return llvm_val; @@ -6873,7 +6873,7 @@ pub const FuncGen = struct { const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(); - const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); + const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.anyerror, 0) }); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { _ = self.builder.buildStore(non_error_val, operand); return operand; @@ -8203,7 +8203,7 @@ pub const FuncGen = struct { const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8494,7 +8494,7 @@ pub const FuncGen = struct { const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); const is_volatile = ptr_ty.isVolatilePtr(mod); - if (self.air.value(bin_op.rhs, mod)) |elem_val| { + if (try self.air.value(bin_op.rhs, mod)) |elem_val| { if (elem_val.isUndefDeep()) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -9323,7 +9323,7 @@ pub const FuncGen = struct { var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined }; for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(mod, i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; @@ -9344,7 +9344,7 @@ pub const FuncGen = struct { } else { var result = llvm_result_ty.getUndef(); for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(mod, i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 32e0c13c376f..3842da5f7bc9 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -232,7 +232,7 @@ pub const DeclGen = struct { /// Fetch the result-id for a previously generated instruction or constant. fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { const mod = self.module; - if (self.air.value(inst, mod)) |val| { + if (try self.air.value(inst, mod)) |val| { const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (val.tag()) { @@ -584,7 +584,7 @@ pub const DeclGen = struct { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by usize for now, // though. - try self.addInt(Type.usize, Value.zero); + try self.addInt(Type.usize, Value.zero_usize); // TODO: Add dependency return; }, @@ -803,7 +803,7 @@ pub const DeclGen = struct { .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else Value.zero; + const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); const eu_layout = dg.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { @@ -2801,7 +2801,7 @@ pub const DeclGen = struct { const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); - const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { const undef = try self.spv.constUndef(ptr_ty_ref); try self.store(ptr_ty, ptr, undef); @@ -3141,7 +3141,7 @@ pub const DeclGen = struct { const label = IdRef{ .id = first_case_label.id + case_i }; for (items) |item| { - const value = self.air.value(item, mod) orelse { + const value = (try self.air.value(item, mod)) orelse { return self.todo("switch on runtime value???", .{}); }; const int_val = switch (cond_ty.zigTypeTag(mod)) { diff --git a/src/type.zig b/src/type.zig index e60d21608556..e6d0af9f46d5 100644 --- a/src/type.zig +++ b/src/type.zig @@ -3377,7 +3377,7 @@ pub const Type = struct { } /// For vectors, returns the element type. Otherwise returns self. - pub fn scalarType(ty: Type, mod: *const Module) Type { + pub fn scalarType(ty: Type, mod: *Module) Type { return switch (ty.zigTypeTag(mod)) { .Vector => ty.childType(mod), else => ty, @@ -3941,13 +3941,13 @@ pub const Type = struct { /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which /// resolves field types rather than asserting they are already resolved. - pub fn onePossibleValue(starting_type: Type, mod: *const Module) ?Value { + pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { var ty = starting_type; if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } @@ -3956,13 +3956,13 @@ pub const Type = struct { .array_type => |array_type| { if (array_type.len == 0) return Value.initTag(.empty_array); - if (array_type.child.toType().onePossibleValue(mod) != null) + if ((try array_type.child.toType().onePossibleValue(mod)) != null) return Value.initTag(.the_only_possible_value); return null; }, .vector_type => |vector_type| { if (vector_type.len == 0) return Value.initTag(.empty_array); - if (vector_type.child.toType().onePossibleValue(mod)) |v| return v; + if (try vector_type.child.toType().onePossibleValue(mod)) |v| return v; return null; }, .opt_type => |child| { @@ -4055,7 +4055,7 @@ pub const Type = struct { assert(s.haveFieldTypes()); for (s.fields.values()) |field| { if (field.is_comptime) continue; - if (field.ty.onePossibleValue(mod) != null) continue; + if ((try field.ty.onePossibleValue(mod)) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -4066,7 +4066,7 @@ pub const Type = struct { for (tuple.values, 0..) |val, i| { const is_comptime = val.ip_index != .unreachable_value; if (is_comptime) continue; - if (tuple.types[i].onePossibleValue(mod) != null) continue; + if ((try tuple.types[i].onePossibleValue(mod)) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -4089,7 +4089,7 @@ pub const Type = struct { switch (enum_full.fields.count()) { 0 => return Value.@"unreachable", 1 => if (enum_full.values.count() == 0) { - return Value.zero; // auto-numbered + return try mod.intValue(ty, 0); // auto-numbered } else { return enum_full.values.keys()[0]; }, @@ -4100,24 +4100,24 @@ pub const Type = struct { const enum_simple = ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { 0 => return Value.@"unreachable", - 1 => return Value.zero, + 1 => return try mod.intValue(ty, 0), else => return null, } }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; if (!tag_ty.hasRuntimeBits(mod)) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - const tag_val = union_obj.tag_ty.onePossibleValue(mod) orelse return null; + const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; if (union_obj.fields.count() == 0) return Value.@"unreachable"; const only_field = union_obj.fields.values()[0]; - const val_val = only_field.ty.onePossibleValue(mod) orelse return null; + const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; _ = tag_val; _ = val_val; return Value.initTag(.empty_struct_value); @@ -4128,7 +4128,7 @@ pub const Type = struct { .array => { if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); - if (ty.childType(mod).onePossibleValue(mod) != null) + if ((try ty.childType(mod).onePossibleValue(mod)) != null) return Value.initTag(.the_only_possible_value); return null; }, @@ -4365,8 +4365,8 @@ pub const Type = struct { /// Asserts that the type is an integer. pub fn minIntScalar(ty: Type, mod: *Module) !Value { const info = ty.intInfo(mod); - if (info.signedness == .unsigned) return Value.zero; - if (info.bits == 0) return Value.negative_one; + if (info.signedness == .unsigned) return mod.intValue(ty, 0); + if (info.bits == 0) return mod.intValue(ty, -1); if (std.math.cast(u6, info.bits - 1)) |shift| { const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); @@ -4392,17 +4392,17 @@ pub const Type = struct { } /// Asserts that the type is an integer. - pub fn maxIntScalar(self: Type, mod: *Module) !Value { - const info = self.intInfo(mod); + pub fn maxIntScalar(ty: Type, mod: *Module) !Value { + const info = ty.intInfo(mod); switch (info.bits) { 0 => return switch (info.signedness) { - .signed => Value.negative_one, - .unsigned => Value.zero, + .signed => mod.intValue(ty, -1), + .unsigned => mod.intValue(ty, 0), }, 1 => return switch (info.signedness) { - .signed => Value.zero, - .unsigned => Value.one, + .signed => mod.intValue(ty, 0), + .unsigned => mod.intValue(ty, 0), }, else => {}, } @@ -4662,7 +4662,7 @@ pub const Type = struct { } } - pub fn structFieldValueComptime(ty: Type, mod: *const Module, index: usize) ?Value { + pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; diff --git a/src/value.zig b/src/value.zig index eced9ba34524..8268d1dde160 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1022,7 +1022,7 @@ pub const Value = struct { if (opt_val) |some| { return some.writeToMemory(child, mod, buffer); } else { - return writeToMemory(Value.zero, Type.usize, mod, buffer); + return writeToMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer); } }, else => return error.Unimplemented, @@ -1124,7 +1124,7 @@ pub const Value = struct { .Packed => { const field_index = ty.unionTagFieldIndex(val.unionTag(), mod); const field_type = ty.unionFields().values()[field_index.?].ty; - const field_val = val.fieldValue(field_type, mod, field_index.?); + const field_val = try val.fieldValue(field_type, mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); }, @@ -1141,7 +1141,7 @@ pub const Value = struct { if (opt_val) |some| { return some.writeToPackedMemory(child, mod, buffer, bit_offset); } else { - return writeToPackedMemory(Value.zero, Type.usize, mod, buffer, bit_offset); + return writeToPackedMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer, bit_offset); } }, else => @panic("TODO implement writeToPackedMemory for more types"), @@ -1173,7 +1173,7 @@ pub const Value = struct { const int_info = ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; - if (bits == 0 or buffer.len == 0) return Value.zero; + if (bits == 0 or buffer.len == 0) return mod.intValue(ty, 0); if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => { @@ -1290,12 +1290,12 @@ pub const Value = struct { } }, .Int, .Enum => { - if (buffer.len == 0) return Value.zero; + if (buffer.len == 0) return mod.intValue(ty, 0); const int_info = ty.intInfo(mod); const abi_size = @intCast(usize, ty.abiSize(mod)); const bits = int_info.bits; - if (bits == 0) return Value.zero; + if (bits == 0) return mod.intValue(ty, 0); if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => return mod.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), .unsigned => return mod.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), @@ -2091,11 +2091,11 @@ pub const Value = struct { // .the_one_possible_value, // .aggregate, // Note that we already checked above for matching tags, e.g. both .aggregate. - return ty.onePossibleValue(mod) != null; + return (try ty.onePossibleValue(mod)) != null; }, .Union => { // Here we have to check for value equality, as-if `a` has been coerced to `ty`. - if (ty.onePossibleValue(mod) != null) { + if ((try ty.onePossibleValue(mod)) != null) { return true; } if (a_ty.castTag(.anon_struct)) |payload| { @@ -2604,7 +2604,7 @@ pub const Value = struct { if (data.container_ptr.pointerDecl()) |decl_index| { const container_decl = mod.declPtr(decl_index); const field_type = data.container_ty.structFieldType(data.field_index); - const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); + const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index); return field_val.elemValue(mod, index); } else unreachable; }, @@ -2723,7 +2723,7 @@ pub const Value = struct { }; } - pub fn fieldValue(val: Value, ty: Type, mod: *const Module, index: usize) Value { + pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value { switch (val.ip_index) { .undef => return Value.undef, .none => switch (val.tag()) { @@ -2737,14 +2737,14 @@ pub const Value = struct { return payload.val; }, - .the_only_possible_value => return ty.onePossibleValue(mod).?, + .the_only_possible_value => return (try ty.onePossibleValue(mod)).?, .empty_struct_value => { if (ty.isSimpleTupleOrAnonStruct()) { const tuple = ty.tupleFields(); return tuple.values[index]; } - if (ty.structFieldValueComptime(mod, index)) |some| { + if (try ty.structFieldValueComptime(mod, index)) |some| { return some; } unreachable; @@ -2968,7 +2968,7 @@ pub const Value = struct { switch (val.ip_index) { .undef => return val, .none => switch (val.tag()) { - .the_only_possible_value => return Value.zero, // for i0, u0 + .the_only_possible_value => return Value.float_zero, // for i0, u0 .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -3402,7 +3402,7 @@ pub const Value = struct { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - const all_ones = if (ty.isSignedInt(mod)) Value.negative_one else try ty.maxIntScalar(mod); + const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod); return bitwiseXor(anded, all_ones, ty, arena, mod); } @@ -3803,7 +3803,7 @@ pub const Value = struct { bits: u16, mod: *Module, ) !Value { - if (bits == 0) return Value.zero; + if (bits == 0) return mod.intValue(ty, 0); var val_space: Value.BigIntSpace = undefined; const val_bigint = val.toBigInt(&val_space, mod); @@ -4011,9 +4011,9 @@ pub const Value = struct { // The shift is enough to remove all the bits from the number, which means the // result is 0 or -1 depending on the sign. if (lhs_bigint.positive) { - return Value.zero; + return mod.intValue(ty, 0); } else { - return Value.negative_one; + return mod.intValue(ty, -1); } } @@ -5151,10 +5151,9 @@ pub const Value = struct { pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; - pub const zero: Value = .{ .ip_index = .zero, .legacy = undefined }; - pub const one: Value = .{ .ip_index = .one, .legacy = undefined }; - pub const negative_one: Value = .{ .ip_index = .negative_one, .legacy = undefined }; + pub const zero_usize: Value = .{ .ip_index = .zero_usize, .legacy = undefined }; pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; + pub const float_zero: Value = .{ .ip_index = .zero, .legacy = undefined }; // TODO: replace this! pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined }; pub const @"false": Value = .{ .ip_index = .bool_false, .legacy = undefined }; @@ -5169,7 +5168,9 @@ pub const Value = struct { } pub fn boolToInt(x: bool) Value { - return if (x) Value.one else Value.zero; + const zero: Value = .{ .ip_index = .zero, .legacy = undefined }; + const one: Value = .{ .ip_index = .one, .legacy = undefined }; + return if (x) one else zero; } pub const RuntimeIndex = enum(u32) { From 8699cdc3dfcf3a3a6f09a64ea9c67be2459e1240 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 7 May 2023 15:06:58 -0700 Subject: [PATCH 045/205] InternPool: fix UAF in getCoercedInt and add u16 int value encoding --- src/InternPool.zig | 83 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 77 insertions(+), 6 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index f6d594cbbbf3..c60f58e20757 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -721,6 +721,9 @@ pub const Tag = enum(u8) { /// only an enum tag, but will be presented via the API with a different Key. /// data is SimpleInternal enum value. simple_internal, + /// Type: u16 + /// data is integer value + int_u16, /// Type: u32 /// data is integer value int_u32, @@ -1053,6 +1056,10 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_error_union => @panic("TODO"), .type_enum_simple => @panic("TODO"), .simple_internal => @panic("TODO"), + .int_u16 => .{ .int = .{ + .ty = .u16_type, + .storage = .{ .u64 = data }, + } }, .int_u32 => .{ .int = .{ .ty = .u32_type, .storage = .{ .u64 = data }, @@ -1219,6 +1226,26 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .int => |int| b: { switch (int.ty) { .none => unreachable, + .u16_type => switch (int.storage) { + .big_int => |big_int| { + if (big_int.to(u32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u16, + .data = casted, + }); + break :b; + } else |_| {} + }, + inline .u64, .i64 => |x| { + if (std.math.cast(u32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u16, + .data = casted, + }); + break :b; + } + }, + }, .u32_type => switch (int.storage) { .big_int => |big_int| { if (big_int.to(u32)) |casted| { @@ -1252,7 +1279,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { inline .u64, .i64 => |x| { if (std.math.cast(i32, x)) |casted| { ip.items.appendAssumeCapacity(.{ - .tag = .int_u32, + .tag = .int_i32, .data = @bitCast(u32, casted), }); break :b; @@ -1466,6 +1493,7 @@ fn limbData(ip: InternPool, comptime T: type, index: usize) T { return result; } +/// This function returns the Limb slice that is trailing data after a payload. fn limbSlice(ip: InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { const field_count = @typeInfo(S).Struct.fields.len; switch (@sizeOf(Limb)) { @@ -1481,6 +1509,33 @@ fn limbSlice(ip: InternPool, comptime S: type, limb_index: u32, len: u32) []cons } } +const LimbsAsIndexes = struct { + start: u32, + len: u32, +}; + +fn limbsSliceToIndex(ip: InternPool, limbs: []const Limb) LimbsAsIndexes { + const host_slice = switch (@sizeOf(Limb)) { + @sizeOf(u32) => ip.extra.items, + @sizeOf(u64) => ip.limbs.items, + else => @compileError("unsupported host"), + }; + // TODO: https://github.com/ziglang/zig/issues/1738 + return .{ + .start = @intCast(u32, @divExact(@ptrToInt(limbs.ptr) - @ptrToInt(host_slice.ptr), @sizeOf(Limb))), + .len = @intCast(u32, limbs.len), + }; +} + +/// This function converts Limb array indexes to a primitive slice type. +fn limbsIndexToSlice(ip: InternPool, limbs: LimbsAsIndexes) []const Limb { + return switch (@sizeOf(Limb)) { + @sizeOf(u32) => ip.extra.items[limbs.start..][0..limbs.len], + @sizeOf(u64) => ip.limbs.items[limbs.start..][0..limbs.len], + else => @compileError("unsupported host"), + }; +} + test "basic usage" { const gpa = std.testing.allocator; @@ -1544,15 +1599,30 @@ pub fn getCoercedInt(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) // Here we pre-reserve the limbs to ensure that the logic in `addInt` will // not use an invalidated limbs pointer. switch (key.int.storage) { - .u64, .i64 => {}, + .u64 => |x| return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = .{ .u64 = x }, + } }), + .i64 => |x| return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = .{ .i64 = x }, + } }), + .big_int => |big_int| { + const positive = big_int.positive; + const limbs = ip.limbsSliceToIndex(big_int.limbs); + // This line invalidates the limbs slice, but the indexes computed in the + // previous line are still correct. try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); + return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = .{ .big_int = .{ + .limbs = ip.limbsIndexToSlice(limbs), + .positive = positive, + } }, + } }); }, } - return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = key.int.storage, - } }); } pub fn dump(ip: InternPool) void { @@ -1608,6 +1678,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .simple_type => 0, .simple_value => 0, .simple_internal => 0, + .int_u16 => 0, .int_u32 => 0, .int_i32 => 0, .int_usize => 0, From a5fb16959423005de999fb541d5d5e9aebb8e09e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 7 May 2023 15:07:28 -0700 Subject: [PATCH 046/205] stage2: bug fixes related to Type/Value/InternPool --- src/Sema.zig | 64 ++++++++++++++++++------------------- src/arch/x86_64/CodeGen.zig | 2 +- src/codegen/c.zig | 8 ++--- src/codegen/llvm.zig | 28 +++++++++------- src/type.zig | 48 +++++++++++++++++----------- src/value.zig | 10 +++++- 6 files changed, 92 insertions(+), 68 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 9b1da7498205..ca6f28017bce 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1746,8 +1746,9 @@ pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { if (i < InternPool.static_len) return @intToEnum(Air.Inst.Ref, i); // The last section of indexes refers to the map of ZIR => AIR. const inst = sema.inst_map.get(i - InternPool.static_len).?; + if (inst == .generic_poison) return error.GenericPoison; const ty = sema.typeOf(inst); - if (ty.isGenericPoison()) return error.GenericPoison; + assert(!ty.isGenericPoison()); return inst; } @@ -2000,7 +2001,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( .constant => { const ty_pl = air_datas[i].ty_pl; const val = sema.air_values.items[ty_pl.payload]; - if (val.tag() == .runtime_value) make_runtime.* = true; + if (val.isRuntimeValue()) make_runtime.* = true; if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, @@ -9688,7 +9689,7 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod); + const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_ty); const dest_max_val = if (is_vector) try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) else @@ -10831,7 +10832,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError defer arena.deinit(); const min_int = try operand_ty.minInt(arena.allocator(), mod); - const max_int = try operand_ty.maxIntScalar(mod); + const max_int = try operand_ty.maxIntScalar(mod, Type.comptime_int); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( @@ -11683,7 +11684,7 @@ const RangeSetUnhandledIterator = struct { fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const mod = sema.mod; const min = try ty.minInt(sema.arena, mod); - const max = try ty.maxIntScalar(mod); + const max = try ty.maxIntScalar(mod, Type.comptime_int); return RangeSetUnhandledIterator{ .sema = sema, @@ -12294,7 +12295,7 @@ fn zirShl( { const max_int = try sema.addConstant( lhs_ty, - try lhs_ty.maxInt(sema.arena, mod), + try lhs_ty.maxInt(sema.arena, mod, lhs_ty), ); const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); @@ -16503,7 +16504,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -22202,8 +22203,8 @@ fn analyzeMinMax( else => unreachable, }; const max_val = switch (air_tag) { - .min => try comptime_elem_ty.maxInt(sema.arena, mod), // @min(ct, rt) <= ct - .max => try unrefined_elem_ty.maxInt(sema.arena, mod), + .min => try comptime_elem_ty.maxInt(sema.arena, mod, Type.comptime_int), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(sema.arena, mod, Type.comptime_int), else => unreachable, }; @@ -27931,33 +27932,32 @@ fn beginComptimePtrMutation( switch (parent.pointee) { .direct => |val_ptr| { const payload_ty = parent.ty.errorUnionPayload(); - switch (val_ptr.tag()) { - else => { - // An error union has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the error union from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, + .ty = payload_ty, + }; + } else { + // An error union has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the error union from `undef` to `opt_payload`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .eu_payload }, - .data = Value.undef, - }; + const payload = try arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .eu_payload }, + .data = Value.undef, + }; - val_ptr.* = Value.initPayload(&payload.base); + val_ptr.* = Value.initPayload(&payload.base); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - }, - .eu_payload => return ComptimePtrMutationKit{ + return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, + .pointee = .{ .direct = &payload.data }, .ty = payload_ty, - }, + }; } }, .bad_decl_ty, .bad_ptr_ty => return parent, @@ -33225,7 +33225,7 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { const gpa = sema.gpa; - if (val.ip_index != .none) { + if (val.ip_index != .none and val.ip_index != .null_value) { if (@enumToInt(val.ip_index) < Air.ref_start_index) return @intToEnum(Air.Inst.Ref, @enumToInt(val.ip_index)); try sema.air_instructions.append(gpa, .{ diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b208656a41f5..c5e3410947fa 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4915,7 +4915,7 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const sign_val = switch (tag) { .neg => try vec_ty.minInt(stack.get(), mod), - .fabs => try vec_ty.maxInt(stack.get(), mod), + .fabs => try vec_ty.maxInt(stack.get(), mod, vec_ty), else => unreachable, }; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index aaeec055626d..b688aada34d0 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -3542,7 +3542,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { try v.elem(f, writer); } else switch (dest_int_info.signedness) { .unsigned => { - const mask_val = try inst_scalar_ty.maxIntScalar(mod); + const mask_val = try inst_scalar_ty.maxIntScalar(mod, scalar_ty); try writer.writeAll("zig_and_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); try writer.writeByte('('); @@ -6681,13 +6681,13 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .And => switch (scalar_ty.zigTypeTag(mod)) { .Bool => try mod.intValue(Type.comptime_int, 1), else => switch (scalar_ty.intInfo(mod).signedness) { - .unsigned => try scalar_ty.maxIntScalar(mod), + .unsigned => try scalar_ty.maxIntScalar(mod, scalar_ty), .signed => try mod.intValue(scalar_ty, -1), }, }, .Min => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => try mod.intValue(Type.comptime_int, 1), - .Int => try scalar_ty.maxIntScalar(mod), + .Bool => Value.one_comptime_int, + .Int => try scalar_ty.maxIntScalar(mod, scalar_ty), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c42719d07cd5..23340b5d349e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3570,15 +3570,21 @@ pub const DeclGen = struct { }, .ErrorSet => { const llvm_ty = try dg.lowerType(Type.anyerror); - switch (tv.val.tag()) { - .@"error" => { - const err_name = tv.val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - return llvm_ty.constInt(kv.value, .False); + switch (tv.val.ip_index) { + .none => switch (tv.val.tag()) { + .@"error" => { + const err_name = tv.val.castTag(.@"error").?.data.name; + const kv = try dg.module.getErrorValue(err_name); + return llvm_ty.constInt(kv.value, .False); + }, + else => { + // In this case we are rendering an error union which has a 0 bits payload. + return llvm_ty.constNull(); + }, }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return llvm_ty.constNull(); + else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .int => |int| return llvm_ty.constInt(int.storage.u64, .False), + else => unreachable, }, } }, @@ -3588,7 +3594,7 @@ pub const DeclGen = struct { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) tv.val else try mod.intValue(Type.anyerror, 0); + const err_val = if (!is_pl) tv.val else try mod.intValue(Type.err_int, 0); return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } @@ -3596,7 +3602,7 @@ pub const DeclGen = struct { const error_align = Type.anyerror.abiAlignment(mod); const llvm_error_value = try dg.lowerValue(.{ .ty = Type.anyerror, - .val = if (is_pl) try mod.intValue(Type.anyerror, 0) else tv.val, + .val = if (is_pl) try mod.intValue(Type.err_int, 0) else tv.val, }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, @@ -6873,7 +6879,7 @@ pub const FuncGen = struct { const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(); - const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.anyerror, 0) }); + const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.err_int, 0) }); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { _ = self.builder.buildStore(non_error_val, operand); return operand; diff --git a/src/type.zig b/src/type.zig index e6d0af9f46d5..68ac50703726 100644 --- a/src/type.zig +++ b/src/type.zig @@ -4382,8 +4382,9 @@ pub const Type = struct { } // Works for vectors and vectors of integers. - pub fn maxInt(ty: Type, arena: Allocator, mod: *Module) !Value { - const scalar = try maxIntScalar(ty.scalarType(mod), mod); + /// The returned Value will have type dest_ty. + pub fn maxInt(ty: Type, arena: Allocator, mod: *Module, dest_ty: Type) !Value { + const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty); if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { @@ -4391,18 +4392,18 @@ pub const Type = struct { } } - /// Asserts that the type is an integer. - pub fn maxIntScalar(ty: Type, mod: *Module) !Value { + /// The returned Value will have type dest_ty. + pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { const info = ty.intInfo(mod); switch (info.bits) { 0 => return switch (info.signedness) { - .signed => mod.intValue(ty, -1), - .unsigned => mod.intValue(ty, 0), + .signed => try mod.intValue(dest_ty, -1), + .unsigned => try mod.intValue(dest_ty, 0), }, 1 => return switch (info.signedness) { - .signed => mod.intValue(ty, 0), - .unsigned => mod.intValue(ty, 0), + .signed => try mod.intValue(dest_ty, 0), + .unsigned => try mod.intValue(dest_ty, 1), }, else => {}, } @@ -4410,11 +4411,11 @@ pub const Type = struct { if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { .signed => { const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); - return mod.intValue(Type.comptime_int, n); + return mod.intValue(dest_ty, n); }, .unsigned => { const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); - return mod.intValue(Type.comptime_int, n); + return mod.intValue(dest_ty, n); }, }; @@ -4423,7 +4424,7 @@ pub const Type = struct { try res.setTwosCompIntLimit(.max, info.signedness, info.bits); - return mod.intValue_big(Type.comptime_int, res.toConst()); + return mod.intValue_big(dest_ty, res.toConst()); } /// Asserts the type is an enum or a union. @@ -5068,6 +5069,7 @@ pub const Type = struct { pub fn isSimpleTuple(ty: Type) bool { return switch (ty.ip_index) { + .empty_struct => true, .none => switch (ty.tag()) { .tuple, .empty_struct_literal => true, else => false, @@ -5077,21 +5079,29 @@ pub const Type = struct { } pub fn isSimpleTupleOrAnonStruct(ty: Type) bool { - return switch (ty.tag()) { - .tuple, .empty_struct_literal, .anon_struct => true, + return switch (ty.ip_index) { + .empty_struct => true, + .none => switch (ty.tag()) { + .tuple, .empty_struct_literal, .anon_struct => true, + else => false, + }, else => false, }; } // Only allowed for simple tuple types pub fn tupleFields(ty: Type) Payload.Tuple.Data { - return switch (ty.tag()) { - .tuple => ty.castTag(.tuple).?.data, - .anon_struct => .{ - .types = ty.castTag(.anon_struct).?.data.types, - .values = ty.castTag(.anon_struct).?.data.values, + return switch (ty.ip_index) { + .empty_struct => .{ .types = &.{}, .values = &.{} }, + .none => switch (ty.tag()) { + .tuple => ty.castTag(.tuple).?.data, + .anon_struct => .{ + .types = ty.castTag(.anon_struct).?.data.types, + .values = ty.castTag(.anon_struct).?.data.values, + }, + .empty_struct_literal => .{ .types = &.{}, .values = &.{} }, + else => unreachable, }, - .empty_struct_literal => .{ .types = &.{}, .values = &.{} }, else => unreachable, }; } diff --git a/src/value.zig b/src/value.zig index 8268d1dde160..49ca651a799e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2625,6 +2625,10 @@ pub const Value = struct { } } + pub fn isRuntimeValue(val: Value) bool { + return val.ip_index == .none and val.tag() == .runtime_value; + } + pub fn tagIsVariable(val: Value) bool { return val.ip_index == .none and val.tag() == .variable; } @@ -3402,7 +3406,7 @@ pub const Value = struct { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod); + const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty); return bitwiseXor(anded, all_ones, ty, arena, mod); } @@ -5152,6 +5156,10 @@ pub const Value = struct { pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; pub const zero_usize: Value = .{ .ip_index = .zero_usize, .legacy = undefined }; + pub const zero_u8: Value = .{ .ip_index = .zero_u8, .legacy = undefined }; + pub const zero_comptime_int: Value = .{ .ip_index = .zero, .legacy = undefined }; + pub const one_comptime_int: Value = .{ .ip_index = .one, .legacy = undefined }; + pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one, .legacy = undefined }; pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; pub const float_zero: Value = .{ .ip_index = .zero, .legacy = undefined }; // TODO: replace this! pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; From 4d88f825bc5eb14aa00446f046ab4714a4fdce70 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 7 May 2023 15:38:31 -0700 Subject: [PATCH 047/205] stage2: implement intTagType logic This commit changes a lot of `*const Module` to `*Module` to make it work, since accessing the integer tag type of an enum might need to mutate the InternPool by adding a new integer type into it. An alternate strategy would be to pre-heat the InternPool with the integer tag type when creating an enum type, which would make it so that intTagType could accept a const Module instead of a mutable one, asserting that the InternPool already had the integer tag type. --- src/Module.zig | 29 ++++++------- src/Sema.zig | 14 +++---- src/arch/aarch64/CodeGen.zig | 2 +- src/arch/aarch64/abi.zig | 6 +-- src/arch/arm/CodeGen.zig | 2 +- src/arch/arm/abi.zig | 4 +- src/arch/riscv64/abi.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/wasm/CodeGen.zig | 16 ++++---- src/arch/wasm/abi.zig | 4 +- src/arch/x86_64/CodeGen.zig | 4 +- src/arch/x86_64/abi.zig | 4 +- src/codegen.zig | 8 ++-- src/codegen/c.zig | 8 ++-- src/codegen/c/type.zig | 8 ++-- src/codegen/llvm.zig | 30 +++++++------- src/codegen/spirv.zig | 6 +-- src/type.zig | 80 +++++++++++++++++------------------- src/value.zig | 44 ++++++++++---------- 19 files changed, 136 insertions(+), 137 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 3f5dc8039e10..ef38e6ff0623 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -944,7 +944,7 @@ pub const Decl = struct { }; } - pub fn getAlignment(decl: Decl, mod: *const Module) u32 { + pub fn getAlignment(decl: Decl, mod: *Module) u32 { assert(decl.has_tv); if (decl.@"align" != 0) { // Explicit alignment. @@ -1053,7 +1053,7 @@ pub const Struct = struct { /// Returns the field alignment. If the struct is packed, returns 0. pub fn alignment( field: Field, - mod: *const Module, + mod: *Module, layout: std.builtin.Type.ContainerLayout, ) u32 { if (field.abi_align != 0) { @@ -1076,7 +1076,7 @@ pub const Struct = struct { } } - pub fn alignmentExtern(field: Field, mod: *const Module) u32 { + pub fn alignmentExtern(field: Field, mod: *Module) u32 { // This logic is duplicated in Type.abiAlignmentAdvanced. const ty_abi_align = field.ty.abiAlignment(mod); @@ -1157,7 +1157,7 @@ pub const Struct = struct { }; } - pub fn packedFieldBitOffset(s: Struct, mod: *const Module, index: usize) u16 { + pub fn packedFieldBitOffset(s: Struct, mod: *Module, index: usize) u16 { assert(s.layout == .Packed); assert(s.haveLayout()); var bit_sum: u64 = 0; @@ -1171,7 +1171,7 @@ pub const Struct = struct { } pub const RuntimeFieldIterator = struct { - module: *const Module, + module: *Module, struct_obj: *const Struct, index: u32 = 0, @@ -1201,7 +1201,7 @@ pub const Struct = struct { } }; - pub fn runtimeFieldIterator(s: *const Struct, module: *const Module) RuntimeFieldIterator { + pub fn runtimeFieldIterator(s: *const Struct, module: *Module) RuntimeFieldIterator { return .{ .struct_obj = s, .module = module, @@ -1353,7 +1353,7 @@ pub const Union = struct { /// Returns the field alignment, assuming the union is not packed. /// Keep implementation in sync with `Sema.unionFieldAlignment`. /// Prefer to call that function instead of this one during Sema. - pub fn normalAlignment(field: Field, mod: *const Module) u32 { + pub fn normalAlignment(field: Field, mod: *Module) u32 { if (field.abi_align == 0) { return field.ty.abiAlignment(mod); } else { @@ -1413,7 +1413,7 @@ pub const Union = struct { }; } - pub fn hasAllZeroBitFieldTypes(u: Union, mod: *const Module) bool { + pub fn hasAllZeroBitFieldTypes(u: Union, mod: *Module) bool { assert(u.haveFieldTypes()); for (u.fields.values()) |field| { if (field.ty.hasRuntimeBits(mod)) return false; @@ -1421,7 +1421,7 @@ pub const Union = struct { return true; } - pub fn mostAlignedField(u: Union, mod: *const Module) u32 { + pub fn mostAlignedField(u: Union, mod: *Module) u32 { assert(u.haveFieldTypes()); var most_alignment: u32 = 0; var most_index: usize = undefined; @@ -1438,7 +1438,7 @@ pub const Union = struct { } /// Returns 0 if the union is represented with 0 bits at runtime. - pub fn abiAlignment(u: Union, mod: *const Module, have_tag: bool) u32 { + pub fn abiAlignment(u: Union, mod: *Module, have_tag: bool) u32 { var max_align: u32 = 0; if (have_tag) max_align = u.tag_ty.abiAlignment(mod); for (u.fields.values()) |field| { @@ -1450,7 +1450,7 @@ pub const Union = struct { return max_align; } - pub fn abiSize(u: Union, mod: *const Module, have_tag: bool) u64 { + pub fn abiSize(u: Union, mod: *Module, have_tag: bool) u64 { return u.getLayout(mod, have_tag).abi_size; } @@ -1481,7 +1481,7 @@ pub const Union = struct { }; } - pub fn getLayout(u: Union, mod: *const Module, have_tag: bool) Layout { + pub fn getLayout(u: Union, mod: *Module, have_tag: bool) Layout { assert(u.haveLayout()); var most_aligned_field: u32 = undefined; var most_aligned_field_size: u64 = undefined; @@ -6988,6 +6988,7 @@ pub const AtomicPtrAlignmentError = error{ FloatTooBig, IntTooBig, BadType, + OutOfMemory, }; pub const AtomicPtrAlignmentDiagnostics = struct { @@ -7001,7 +7002,7 @@ pub const AtomicPtrAlignmentDiagnostics = struct { // TODO this function does not take into account CPU features, which can affect // this value. Audit this! pub fn atomicPtrAlignment( - mod: *const Module, + mod: *Module, ty: Type, diags: *AtomicPtrAlignmentDiagnostics, ) AtomicPtrAlignmentError!u32 { @@ -7080,7 +7081,7 @@ pub fn atomicPtrAlignment( const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, - .Enum => ty.intTagType(), + .Enum => try ty.intTagType(mod), .Float => { const bit_count = ty.floatBits(target); if (bit_count > max_atomic_bits) { diff --git a/src/Sema.zig b/src/Sema.zig index ca6f28017bce..872570493788 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8249,7 +8249,6 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; - const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -8278,7 +8277,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; const enum_tag_ty = sema.typeOf(enum_tag); - const int_tag_ty = try enum_tag_ty.intTagType().copy(arena); + const int_tag_ty = try enum_tag_ty.intTagType(mod); if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| { return sema.addConstant(int_tag_ty, opv); @@ -8310,7 +8309,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (try sema.resolveMaybeUndefVal(operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum()) { - const int_tag_ty = dest_ty.intTagType(); + const int_tag_ty = try dest_ty.intTagType(mod); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { return sema.addConstant(dest_ty, int_val); } @@ -16268,7 +16267,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Enum => { // TODO: look into memoizing this result. - const int_tag_ty = try ty.intTagType().copy(sema.arena); + const int_tag_ty = try ty.intTagType(mod); const is_exhaustive = Value.makeBool(!ty.isNonexhaustiveEnum()); @@ -20354,7 +20353,7 @@ fn zirBitCount( block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, - comptime comptimeOp: fn (val: Value, ty: Type, mod: *const Module) u64, + comptime comptimeOp: fn (val: Value, ty: Type, mod: *Module) u64, ) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; @@ -20755,6 +20754,7 @@ fn checkAtomicPtrOperand( const mod = sema.mod; var diag: Module.AtomicPtrAlignmentDiagnostics = .{}; const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.FloatTooBig => return sema.fail( block, elem_ty_src, @@ -23462,7 +23462,7 @@ fn validateExternType( return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention()); }, .Enum => { - return sema.validateExternType(ty.intTagType(), position); + return sema.validateExternType(try ty.intTagType(mod), position); }, .Struct, .Union => switch (ty.containerLayout()) { .Extern => return true, @@ -23540,7 +23540,7 @@ fn explainWhyTypeIsNotExtern( } }, .Enum => { - const tag_ty = ty.intTagType(); + const tag_ty = try ty.intTagType(mod); try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position); }, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index ea3814a20e33..970d59a25f53 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4533,7 +4533,7 @@ fn cmp( } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(), + .Enum => try lhs_ty.intTagType(mod), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 821afd27aef3..1d042b632a67 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -15,7 +15,7 @@ pub const Class = union(enum) { }; /// For `float_array` the second element will be the amount of floats. -pub fn classifyType(ty: Type, mod: *const Module) Class { +pub fn classifyType(ty: Type, mod: *Module) Class { std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; @@ -74,7 +74,7 @@ pub fn classifyType(ty: Type, mod: *const Module) Class { } const sret_float_count = 4; -fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u8 { +fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { const target = mod.getTarget(); const invalid = std.math.maxInt(u8); switch (ty.zigTypeTag(mod)) { @@ -115,7 +115,7 @@ fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u8 { } } -pub fn getFloatArrayType(ty: Type, mod: *const Module) ?Type { +pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type { switch (ty.zigTypeTag(mod)) { .Union => { const fields = ty.unionFields(); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 967a6dd7538a..50f6d76c55a7 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4480,7 +4480,7 @@ fn cmp( } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(), + .Enum => try lhs_ty.intTagType(mod), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index eee4b41eefab..79ffadf831e6 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -24,7 +24,7 @@ pub const Class = union(enum) { pub const Context = enum { ret, arg }; -pub fn classifyType(ty: Type, mod: *const Module, ctx: Context) Class { +pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; @@ -116,7 +116,7 @@ pub fn classifyType(ty: Type, mod: *const Module, ctx: Context) Class { } const byval_float_count = 4; -fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u32 { +fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 { const target = mod.getTarget(); const invalid = std.math.maxInt(u32); switch (ty.zigTypeTag(mod)) { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index ac0d8d3e32d1..28a69d913611 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -7,7 +7,7 @@ const Module = @import("../../Module.zig"); pub const Class = enum { memory, byval, integer, double_integer }; -pub fn classifyType(ty: Type, mod: *const Module) Class { +pub fn classifyType(ty: Type, mod: *Module) Class { const target = mod.getTarget(); std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 2cb35460c2ac..0490db615b90 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1436,7 +1436,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Vector => unreachable, // Handled by cmp_vector. - .Enum => lhs_ty.intTagType(), + .Enum => try lhs_ty.intTagType(mod), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 36b805cf9440..237a55984ed0 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1393,7 +1393,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV return result; } -fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *const Module) bool { +fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *Module) bool { switch (cc) { .Unspecified, .Inline => return isByRef(return_type, mod), .C => { @@ -1713,7 +1713,7 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch { /// For a given `Type`, will return true when the type will be passed /// by reference, rather than by value -fn isByRef(ty: Type, mod: *const Module) bool { +fn isByRef(ty: Type, mod: *Module) bool { const target = mod.getTarget(); switch (ty.zigTypeTag(mod)) { .Type, @@ -1787,7 +1787,7 @@ const SimdStoreStrategy = enum { /// This means when a given type is 128 bits and either the simd128 or relaxed-simd /// features are enabled, the function will return `.direct`. This would allow to store /// it using a instruction, rather than an unrolled version. -fn determineSimdStoreStrategy(ty: Type, mod: *const Module) SimdStoreStrategy { +fn determineSimdStoreStrategy(ty: Type, mod: *Module) SimdStoreStrategy { std.debug.assert(ty.zigTypeTag(mod) == .Vector); if (ty.bitSize(mod) != 128) return .unrolled; const hasFeature = std.Target.wasm.featureSetHas; @@ -3121,7 +3121,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}), } } else { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return func.lowerConstant(val, int_tag_ty); } }, @@ -3235,7 +3235,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { /// Returns a `Value` as a signed 32 bit value. /// It's illegal to provide a value with a type that cannot be represented /// as an integer value. -fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { +fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) !i32 { const mod = func.bin_file.base.options.module.?; switch (ty.zigTypeTag(mod)) { .Enum => { @@ -3257,7 +3257,7 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { else => unreachable, } } else { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return func.valueAsI32(val, int_tag_ty); } }, @@ -3793,7 +3793,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { for (items, 0..) |ref, i| { const item_val = (try func.air.value(ref, mod)).?; - const int_val = func.valueAsI32(item_val, target_ty); + const int_val = try func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; } @@ -6814,7 +6814,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { return loc.index; } - const int_tag_ty = enum_ty.intTagType(); + const int_tag_ty = try enum_ty.intTagType(mod); if (int_tag_ty.bitSize(mod) > 64) { return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{}); diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index c7819b0fa654..bb5911382b7e 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -21,7 +21,7 @@ const direct: [2]Class = .{ .direct, .none }; /// Classifies a given Zig type to determine how they must be passed /// or returned as value within a wasm function. /// When all elements result in `.none`, no value must be passed in or returned. -pub fn classifyType(ty: Type, mod: *const Module) [2]Class { +pub fn classifyType(ty: Type, mod: *Module) [2]Class { const target = mod.getTarget(); if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; switch (ty.zigTypeTag(mod)) { @@ -93,7 +93,7 @@ pub fn classifyType(ty: Type, mod: *const Module) [2]Class { /// Returns the scalar type a given type can represent. /// Asserts given type can be represented as scalar, such as /// a struct with a single scalar field. -pub fn scalarType(ty: Type, mod: *const Module) Type { +pub fn scalarType(ty: Type, mod: *Module) Type { switch (ty.zigTypeTag(mod)) { .Struct => { switch (ty.containerLayout()) { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index c5e3410947fa..1cfed06ff18a 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -605,7 +605,7 @@ const FrameAlloc = struct { .ref_count = 0, }; } - fn initType(ty: Type, mod: *const Module) FrameAlloc { + fn initType(ty: Type, mod: *Module) FrameAlloc { return init(.{ .size = ty.abiSize(mod), .alignment = ty.abiAlignment(mod) }); } }; @@ -2309,7 +2309,7 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b return .{ .load_frame = .{ .index = frame_index } }; } -fn regClassForType(ty: Type, mod: *const Module) RegisterManager.RegisterBitSet { +fn regClassForType(ty: Type, mod: *Module) RegisterManager.RegisterBitSet { return switch (ty.zigTypeTag(mod)) { .Float, .Vector => sse, else => gp, diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index ea75a1f4d2ff..1bae899d33ce 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -12,7 +12,7 @@ pub const Class = enum { float_combine, }; -pub fn classifyWindows(ty: Type, mod: *const Module) Class { +pub fn classifyWindows(ty: Type, mod: *Module) Class { // https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017 // "There's a strict one-to-one correspondence between a function call's arguments // and the registers used for those arguments. Any argument that doesn't fit in 8 @@ -68,7 +68,7 @@ pub const Context = enum { ret, arg, other }; /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class { +pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { const target = mod.getTarget(); const memory_class = [_]Class{ .memory, .none, .none, .none, diff --git a/src/codegen.zig b/src/codegen.zig index 8bd478bf7cde..70df1fc17ba8 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1241,7 +1241,7 @@ pub fn genTypedValue( if (enum_values.count() != 0) { const tag_val = enum_values.keys()[field_index.data]; return genTypedValue(bin_file, src_loc, .{ - .ty = typed_value.ty.intTagType(), + .ty = try typed_value.ty.intTagType(mod), .val = tag_val, }, owner_decl_index); } else { @@ -1251,7 +1251,7 @@ pub fn genTypedValue( else => unreachable, } } else { - const int_tag_ty = typed_value.ty.intTagType(); + const int_tag_ty = try typed_value.ty.intTagType(mod); return genTypedValue(bin_file, src_loc, .{ .ty = int_tag_ty, .val = typed_value.val, @@ -1303,7 +1303,7 @@ pub fn genTypedValue( return genUnnamedConst(bin_file, src_loc, typed_value, owner_decl_index); } -pub fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u64 { +pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; const payload_align = payload_ty.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod); @@ -1314,7 +1314,7 @@ pub fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u64 { } } -pub fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u64 { +pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; const payload_align = payload_ty.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index b688aada34d0..3c6f5a9e73e2 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1300,7 +1300,7 @@ pub const DeclGen = struct { } }, else => { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return dg.renderValue(writer, int_tag_ty, val, location); }, } @@ -5198,7 +5198,7 @@ fn fieldLocation( container_ty: Type, field_ptr_ty: Type, field_index: u32, - mod: *const Module, + mod: *Module, ) union(enum) { begin: void, field: CValue, @@ -7722,7 +7722,7 @@ const LowerFnRetTyBuffer = struct { values: [1]Value, payload: Type.Payload.AnonStruct, }; -fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) Type { +fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *Module) Type { if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.noreturn; if (lowersToArray(ret_ty, mod)) { @@ -7740,7 +7740,7 @@ fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) T return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void; } -fn lowersToArray(ty: Type, mod: *const Module) bool { +fn lowersToArray(ty: Type, mod: *Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => return true, else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null, diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 84ddce680912..082340085873 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -292,17 +292,17 @@ pub const CType = extern union { .abi = std.math.log2_int(u32, abi_alignment), }; } - pub fn abiAlign(ty: Type, mod: *const Module) AlignAs { + pub fn abiAlign(ty: Type, mod: *Module) AlignAs { const abi_align = ty.abiAlignment(mod); return init(abi_align, abi_align); } - pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *const Module) AlignAs { + pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *Module) AlignAs { return init( struct_ty.structFieldAlign(field_i, mod), struct_ty.structFieldType(field_i).abiAlignment(mod), ); } - pub fn unionPayloadAlign(union_ty: Type, mod: *const Module) AlignAs { + pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs { const union_obj = union_ty.cast(Type.Payload.Union).?.data; const union_payload_align = union_obj.abiAlignment(mod, false); return init(union_payload_align, union_payload_align); @@ -1897,7 +1897,7 @@ pub const CType = extern union { } } - fn createFromType(store: *Store.Promoted, ty: Type, mod: *const Module, kind: Kind) !CType { + fn createFromType(store: *Store.Promoted, ty: Type, mod: *Module, kind: Kind) !CType { var convert: Convert = undefined; try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .mod = mod } }); return createFromConvert(store, ty, mod, kind, &convert); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 23340b5d349e..5f013c38ec18 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1527,7 +1527,7 @@ pub const Object = struct { }; const field_index_val = Value.initPayload(&buf_field_index.base); - const int_ty = ty.intTagType(); + const int_ty = try ty.intTagType(mod); const int_info = ty.intInfo(mod); assert(int_info.bits != 0); @@ -2805,7 +2805,7 @@ pub const DeclGen = struct { return dg.context.intType(info.bits); }, .Enum => { - const int_ty = t.intTagType(); + const int_ty = try t.intTagType(mod); const bit_count = int_ty.intInfo(mod).bits; assert(bit_count != 0); return dg.context.intType(bit_count); @@ -4334,7 +4334,9 @@ pub const DeclGen = struct { const mod = dg.module; const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, - .Enum => ty.intTagType(), + .Enum => ty.intTagType(mod) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), + }, .Float => { if (!is_rmw_xchg) return null; return dg.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8)); @@ -5286,7 +5288,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const scalar_ty = operand_ty.scalarType(mod); const int_ty = switch (scalar_ty.zigTypeTag(mod)) { - .Enum => scalar_ty.intTagType(), + .Enum => try scalar_ty.intTagType(mod), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(mod); @@ -8867,7 +8869,7 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); - const int_tag_ty = enum_ty.intTagType(); + const int_tag_ty = try enum_ty.intTagType(mod); const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; const llvm_ret_ty = try self.dg.lowerType(Type.bool); @@ -8950,7 +8952,7 @@ pub const FuncGen = struct { const usize_llvm_ty = try self.dg.lowerType(Type.usize); const slice_alignment = slice_ty.abiAlignment(mod); - const int_tag_ty = enum_ty.intTagType(); + const int_tag_ty = try enum_ty.intTagType(mod); const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); @@ -10487,7 +10489,7 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ fn llvmFieldIndex( ty: Type, field_index: usize, - mod: *const Module, + mod: *Module, ptr_pl_buf: *Type.Payload.Pointer, ) ?c_uint { // Detects where we inserted extra padding fields so that we can skip @@ -10564,7 +10566,7 @@ fn llvmFieldIndex( } } -fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *const Module) bool { +fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *Module) bool { if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) return false; const target = mod.getTarget(); @@ -10593,7 +10595,7 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *const Module) bool } } -fn firstParamSRetSystemV(ty: Type, mod: *const Module) bool { +fn firstParamSRetSystemV(ty: Type, mod: *Module) bool { const class = x86_64_abi.classifySystemV(ty, mod, .ret); if (class[0] == .memory) return true; if (class[0] == .x87 and class[2] != .none) return true; @@ -11041,7 +11043,7 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp fn ccAbiPromoteInt( cc: std.builtin.CallingConvention, - mod: *const Module, + mod: *Module, ty: Type, ) ?std.builtin.Signedness { const target = mod.getTarget(); @@ -11080,7 +11082,7 @@ fn ccAbiPromoteInt( /// This is the one source of truth for whether a type is passed around as an LLVM pointer, /// or as an LLVM value. -fn isByRef(ty: Type, mod: *const Module) bool { +fn isByRef(ty: Type, mod: *Module) bool { // For tuples and structs, if there are more than this many non-void // fields, then we make it byref, otherwise byval. const max_fields_byval = 0; @@ -11159,7 +11161,7 @@ fn isByRef(ty: Type, mod: *const Module) bool { } } -fn isScalar(mod: *const Module, ty: Type) bool { +fn isScalar(mod: *Module, ty: Type) bool { return switch (ty.zigTypeTag(mod)) { .Void, .Bool, @@ -11344,11 +11346,11 @@ fn buildAllocaInner( return alloca; } -fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u1 { +fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u1 { return @boolToInt(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod)); } -fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u1 { +fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u1 { return @boolToInt(Type.anyerror.abiAlignment(mod) <= payload_ty.abiAlignment(mod)); } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 3842da5f7bc9..843b67e4262a 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -745,7 +745,7 @@ pub const DeclGen = struct { .Enum => { const int_val = try val.enumToInt(ty, mod); - const int_ty = ty.intTagType(); + const int_ty = try ty.intTagType(mod); try self.lower(int_ty, int_val); }, @@ -1195,7 +1195,7 @@ pub const DeclGen = struct { return try self.intType(int_info.signedness, int_info.bits); }, .Enum => { - const tag_ty = ty.intTagType(); + const tag_ty = try ty.intTagType(mod); return self.resolveType(tag_ty, repr); }, .Float => { @@ -3090,7 +3090,7 @@ pub const DeclGen = struct { break :blk if (backing_bits <= 32) @as(u32, 1) else 2; }, .Enum => blk: { - const int_ty = cond_ty.intTagType(); + const int_ty = try cond_ty.intTagType(mod); const int_info = int_ty.intInfo(mod); const backing_bits = self.backingIntBits(int_info.bits) orelse { return self.todo("implement composite int switch", .{}); diff --git a/src/type.zig b/src/type.zig index 68ac50703726..9c8c1f1591f7 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1606,7 +1606,7 @@ pub const Type = struct { /// may return false positives. pub fn hasRuntimeBitsAdvanced( ty: Type, - mod: *const Module, + mod: *Module, ignore_comptime_only: bool, strat: AbiAlignmentAdvancedStrat, ) RuntimeBitsError!bool { @@ -1785,7 +1785,7 @@ pub const Type = struct { return enum_simple.fields.count() >= 2; }, .enum_numbered, .enum_nonexhaustive => { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); }, @@ -1850,7 +1850,7 @@ pub const Type = struct { /// true if and only if the type has a well-defined memory layout /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout - pub fn hasWellDefinedLayout(ty: Type, mod: *const Module) bool { + pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => true, .ptr_type => true, @@ -1952,15 +1952,15 @@ pub const Type = struct { }; } - pub fn hasRuntimeBits(ty: Type, mod: *const Module) bool { + pub fn hasRuntimeBits(ty: Type, mod: *Module) bool { return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; } - pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool { + pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; } - pub fn isFnOrHasRuntimeBits(ty: Type, mod: *const Module) bool { + pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { switch (ty.zigTypeTag(mod)) { .Fn => { const fn_info = ty.fnInfo(); @@ -1980,7 +1980,7 @@ pub const Type = struct { } /// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. - pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool { + pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { return switch (ty.zigTypeTag(mod)) { .Fn => true, else => return ty.hasRuntimeBitsIgnoreComptime(mod), @@ -2019,11 +2019,11 @@ pub const Type = struct { } /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. - pub fn ptrAlignment(ty: Type, mod: *const Module) u32 { + pub fn ptrAlignment(ty: Type, mod: *Module) u32 { return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; } - pub fn ptrAlignmentAdvanced(ty: Type, mod: *const Module, opt_sema: ?*Sema) !u32 { + pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 { switch (ty.ip_index) { .none => switch (ty.tag()) { .pointer => { @@ -2072,7 +2072,7 @@ pub const Type = struct { } /// Returns 0 for 0-bit types. - pub fn abiAlignment(ty: Type, mod: *const Module) u32 { + pub fn abiAlignment(ty: Type, mod: *Module) u32 { return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } @@ -2103,7 +2103,7 @@ pub const Type = struct { /// necessary, possibly returning a CompileError. pub fn abiAlignmentAdvanced( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiAlignmentAdvanced { const target = mod.getTarget(); @@ -2320,7 +2320,7 @@ pub const Type = struct { }, .enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) }; }, .@"union" => { @@ -2344,7 +2344,7 @@ pub const Type = struct { fn abiAlignmentAdvancedErrorUnion( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiAlignmentAdvanced { // This code needs to be kept in sync with the equivalent switch prong @@ -2380,7 +2380,7 @@ pub const Type = struct { fn abiAlignmentAdvancedOptional( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiAlignmentAdvanced { const target = mod.getTarget(); @@ -2412,7 +2412,7 @@ pub const Type = struct { pub fn abiAlignmentAdvancedUnion( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, union_obj: *Module.Union, have_tag: bool, @@ -2477,7 +2477,7 @@ pub const Type = struct { /// Asserts the type has the ABI size already resolved. /// Types that return false for hasRuntimeBits() return 0. - pub fn abiSize(ty: Type, mod: *const Module) u64 { + pub fn abiSize(ty: Type, mod: *Module) u64 { return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; } @@ -2494,7 +2494,7 @@ pub const Type = struct { /// necessary, possibly returning a CompileError. pub fn abiSizeAdvanced( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiSizeAdvanced { const target = mod.getTarget(); @@ -2661,7 +2661,7 @@ pub const Type = struct { }, .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) }; }, .@"union" => { @@ -2754,7 +2754,7 @@ pub const Type = struct { pub fn abiSizeAdvancedUnion( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, union_obj: *Module.Union, have_tag: bool, @@ -2773,7 +2773,7 @@ pub const Type = struct { fn abiSizeAdvancedOptional( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiSizeAdvanced { const child_ty = ty.optionalChild(mod); @@ -2821,7 +2821,7 @@ pub const Type = struct { ); } - pub fn bitSize(ty: Type, mod: *const Module) u64 { + pub fn bitSize(ty: Type, mod: *Module) u64 { return bitSizeAdvanced(ty, mod, null) catch unreachable; } @@ -2830,7 +2830,7 @@ pub const Type = struct { /// the type is fully resolved, and there will be no error, guaranteed. pub fn bitSizeAdvanced( ty: Type, - mod: *const Module, + mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!u64 { const target = mod.getTarget(); @@ -2950,7 +2950,7 @@ pub const Type = struct { }, .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); }, @@ -3464,11 +3464,11 @@ pub const Type = struct { return union_obj.fields.getIndex(name); } - pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *const Module) bool { + pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool { return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(mod); } - pub fn unionGetLayout(ty: Type, mod: *const Module) Module.Union.Layout { + pub fn unionGetLayout(ty: Type, mod: *Module) Module.Union.Layout { switch (ty.tag()) { .@"union" => { const union_obj = ty.castTag(.@"union").?.data; @@ -4428,24 +4428,18 @@ pub const Type = struct { } /// Asserts the type is an enum or a union. - pub fn intTagType(ty: Type) Type { + pub fn intTagType(ty: Type, mod: *Module) !Type { switch (ty.tag()) { .enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty, .enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty, .enum_simple => { - @panic("TODO move enum_simple to use the intern pool"); - //const enum_simple = ty.castTag(.enum_simple).?.data; - //const field_count = enum_simple.fields.count(); - //const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); - //buffer.* = .{ - // .base = .{ .tag = .int_unsigned }, - // .data = bits, - //}; - //return Type.initPayload(&buffer.base); + const enum_simple = ty.castTag(.enum_simple).?.data; + const field_count = enum_simple.fields.count(); + const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); + return mod.intType(.unsigned, bits); }, .union_tagged => { - @panic("TODO move union_tagged to use the intern pool"); - //return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer), + return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(mod); }, else => unreachable, } @@ -4628,7 +4622,7 @@ pub const Type = struct { } } - pub fn structFieldAlign(ty: Type, index: usize, mod: *const Module) u32 { + pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -4718,7 +4712,7 @@ pub const Type = struct { } } - pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *const Module) u32 { + pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.layout == .Packed); comptime assert(Type.packed_struct_layout_version == 2); @@ -4750,7 +4744,7 @@ pub const Type = struct { offset: u64 = 0, big_align: u32 = 0, struct_obj: *Module.Struct, - module: *const Module, + module: *Module, pub fn next(it: *StructOffsetIterator) ?FieldOffset { const mod = it.module; @@ -4779,7 +4773,7 @@ pub const Type = struct { /// Get an iterator that iterates over all the struct field, returning the field and /// offset of that field. Asserts that the type is a non-packed struct. - pub fn iterateStructOffsets(ty: Type, mod: *const Module) StructOffsetIterator { + pub fn iterateStructOffsets(ty: Type, mod: *Module) StructOffsetIterator { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); @@ -4787,7 +4781,7 @@ pub const Type = struct { } /// Supports structs and unions. - pub fn structFieldOffset(ty: Type, index: usize, mod: *const Module) u64 { + pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -5226,7 +5220,7 @@ pub const Type = struct { pub const VectorIndex = InternPool.Key.PtrType.VectorIndex; - pub fn alignment(data: Data, mod: *const Module) u32 { + pub fn alignment(data: Data, mod: *Module) u32 { if (data.@"align" != 0) return data.@"align"; return abiAlignment(data.pointee_type, mod); } diff --git a/src/value.zig b/src/value.zig index 49ca651a799e..402e0981d3ff 100644 --- a/src/value.zig +++ b/src/value.zig @@ -694,7 +694,7 @@ pub const Value = struct { }, .enum_simple => { // Field index and integer values are the same. - const tag_ty = ty.intTagType(); + const tag_ty = try ty.intTagType(mod); return mod.intValue(tag_ty, field_index); }, else => unreachable, @@ -722,7 +722,9 @@ pub const Value = struct { // auto-numbered enum break :field_index @intCast(u32, val.toUnsignedInt(mod)); } - const int_tag_ty = ty.intTagType(); + const int_tag_ty = ty.intTagType(mod) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO handle this failure + }; break :field_index @intCast(u32, values.getIndexContext(val, .{ .ty = int_tag_ty, .mod = mod }).?); }, }; @@ -737,7 +739,7 @@ pub const Value = struct { } /// Asserts the value is an integer. - pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *const Module) BigIntConst { + pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst { return val.toBigIntAdvanced(space, mod, null) catch unreachable; } @@ -745,7 +747,7 @@ pub const Value = struct { pub fn toBigIntAdvanced( val: Value, space: *BigIntSpace, - mod: *const Module, + mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!BigIntConst { return switch (val.ip_index) { @@ -801,13 +803,13 @@ pub const Value = struct { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. - pub fn getUnsignedInt(val: Value, mod: *const Module) ?u64 { + pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { return getUnsignedIntAdvanced(val, mod, null) catch unreachable; } /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. - pub fn getUnsignedIntAdvanced(val: Value, mod: *const Module, opt_sema: ?*Sema) !?u64 { + pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, @@ -847,12 +849,12 @@ pub const Value = struct { } /// Asserts the value is an integer and it fits in a u64 - pub fn toUnsignedInt(val: Value, mod: *const Module) u64 { + pub fn toUnsignedInt(val: Value, mod: *Module) u64 { return getUnsignedInt(val, mod).?; } /// Asserts the value is an integer and it fits in a i64 - pub fn toSignedInt(val: Value, mod: *const Module) i64 { + pub fn toSignedInt(val: Value, mod: *Module) i64 { switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, @@ -1405,7 +1407,7 @@ pub const Value = struct { } } - pub fn clz(val: Value, ty: Type, mod: *const Module) u64 { + pub fn clz(val: Value, ty: Type, mod: *Module) u64 { const ty_bits = ty.intInfo(mod).bits; return switch (val.ip_index) { .bool_false => ty_bits, @@ -1435,7 +1437,7 @@ pub const Value = struct { }; } - pub fn ctz(val: Value, ty: Type, mod: *const Module) u64 { + pub fn ctz(val: Value, ty: Type, mod: *Module) u64 { const ty_bits = ty.intInfo(mod).bits; return switch (val.ip_index) { .bool_false => ty_bits, @@ -1468,7 +1470,7 @@ pub const Value = struct { }; } - pub fn popCount(val: Value, ty: Type, mod: *const Module) u64 { + pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { assert(!val.isUndef()); switch (val.ip_index) { .bool_false => return 0, @@ -1527,7 +1529,7 @@ pub const Value = struct { /// Asserts the value is an integer and not undefined. /// Returns the number of bits the value requires to represent stored in twos complement form. - pub fn intBitCountTwosComp(self: Value, mod: *const Module) usize { + pub fn intBitCountTwosComp(self: Value, mod: *Module) usize { const target = mod.getTarget(); return switch (self.ip_index) { .bool_false => 0, @@ -1593,13 +1595,13 @@ pub const Value = struct { }; } - pub fn orderAgainstZero(lhs: Value, mod: *const Module) std.math.Order { + pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order { return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable; } pub fn orderAgainstZeroAdvanced( lhs: Value, - mod: *const Module, + mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { switch (lhs.ip_index) { @@ -1683,13 +1685,13 @@ pub const Value = struct { } /// Asserts the value is comparable. - pub fn order(lhs: Value, rhs: Value, mod: *const Module) std.math.Order { + pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order { return orderAdvanced(lhs, rhs, mod, null) catch unreachable; } /// Asserts the value is comparable. /// If opt_sema is null then this function asserts things are resolved and cannot fail. - pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *const Module, opt_sema: ?*Sema) !std.math.Order { + pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !std.math.Order { const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema); switch (lhs_against_zero) { @@ -1734,7 +1736,7 @@ pub const Value = struct { /// Asserts the value is comparable. Does not take a type parameter because it supports /// comparisons between heterogeneous types. - pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *const Module) bool { + pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool { return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable; } @@ -1742,7 +1744,7 @@ pub const Value = struct { lhs: Value, op: std.math.CompareOperator, rhs: Value, - mod: *const Module, + mod: *Module, opt_sema: ?*Sema, ) !bool { if (lhs.pointerDecl()) |lhs_decl| { @@ -2047,7 +2049,7 @@ pub const Value = struct { .Enum => { const a_val = try a.enumToInt(ty, mod); const b_val = try b.enumToInt(ty, mod); - const int_ty = ty.intTagType(); + const int_ty = try ty.intTagType(mod); return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, .Array, .Vector => { @@ -2462,7 +2464,7 @@ pub const Value = struct { }; } - fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void { + fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, mod: *Module) void { var buffer: BigIntSpace = undefined; const big = int_val.toBigInt(&buffer, mod); std.hash.autoHash(hasher, big.positive); @@ -2471,7 +2473,7 @@ pub const Value = struct { } } - fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void { + fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *Module) void { switch (ptr_val.tag()) { .decl_ref, .decl_ref_mut, From 4fe0c583be8890b1cb8059c2daff3bd82c53d2e9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 7 May 2023 16:14:08 -0700 Subject: [PATCH 048/205] stage2: more InternPool-related fixes --- src/Module.zig | 1 + src/Sema.zig | 20 ++++++++++++++------ src/type.zig | 23 +++++++++++++++-------- src/value.zig | 37 +++++++++++++++++++++++++++---------- 4 files changed, 57 insertions(+), 24 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index ef38e6ff0623..d06d22402a94 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6716,6 +6716,7 @@ fn reportRetryableFileError( } pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void { + if (val.ip_index != .none) return; switch (val.tag()) { .decl_ref_mut => return mod.markDeclIndexAlive(val.castTag(.decl_ref_mut).?.data.decl_index), .extern_fn => return mod.markDeclIndexAlive(val.castTag(.extern_fn).?.data.owner_decl), diff --git a/src/Sema.zig b/src/Sema.zig index 872570493788..b8b0a6513f23 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -12307,8 +12307,7 @@ fn zirShl( if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try mod.intValue(scalar_ty, bit_count); - + const bit_count_val = try mod.intValue(scalar_rhs_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); @@ -27391,10 +27390,19 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { const prev_ptr = while (air_tags[ptr_inst] == .bitcast) { const prev_ptr = air_datas[ptr_inst].ty_op.operand; const prev_ptr_ty = sema.typeOf(prev_ptr); - const prev_ptr_child_ty = switch (prev_ptr_ty.tag()) { - .pointer => prev_ptr_ty.castTag(.pointer).?.data.pointee_type, - else => return null, - }; + if (prev_ptr_ty.zigTypeTag(mod) != .Pointer) return null; + + // TODO: I noticed that the behavior tests do not pass if these two + // checks are missing. I don't understand why the presence of inferred + // allocations is relevant to this function, or why it would have + // different behavior depending on whether the types were inferred. + // Something seems wrong here. + if (prev_ptr_ty.ip_index == .none) { + if (prev_ptr_ty.tag() == .inferred_alloc_mut) return null; + if (prev_ptr_ty.tag() == .inferred_alloc_const) return null; + } + + const prev_ptr_child_ty = prev_ptr_ty.childType(mod); if (prev_ptr_child_ty.zigTypeTag(mod) == .Vector) break prev_ptr; ptr_inst = Air.refToIndex(prev_ptr) orelse return null; } else return null; diff --git a/src/type.zig b/src/type.zig index 9c8c1f1591f7..b271a3ea4592 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2058,16 +2058,23 @@ pub const Type = struct { } } - pub fn ptrAddressSpace(self: Type, mod: *const Module) std.builtin.AddressSpace { - return switch (self.tag()) { - .pointer => self.castTag(.pointer).?.data.@"addrspace", + pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.@"addrspace", - .optional => { - const child_type = self.optionalChild(mod); - return child_type.ptrAddressSpace(mod); - }, + .optional => { + const child_type = ty.optionalChild(mod); + return child_type.ptrAddressSpace(mod); + }, - else => unreachable, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.address_space, + .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.address_space, + else => unreachable, + }, }; } diff --git a/src/value.zig b/src/value.zig index 402e0981d3ff..d771f53a3ed9 100644 --- a/src/value.zig +++ b/src/value.zig @@ -644,15 +644,32 @@ pub const Value = struct { /// Asserts the type is an enum type. pub fn toEnum(val: Value, comptime E: type) E { - switch (val.tag()) { - .enum_field_index => { - const field_index = val.castTag(.enum_field_index).?.data; - return @intToEnum(E, field_index); + switch (val.ip_index) { + .calling_convention_c => { + if (E == std.builtin.CallingConvention) { + return .C; + } else { + unreachable; + } + }, + .calling_convention_inline => { + if (E == std.builtin.CallingConvention) { + return .Inline; + } else { + unreachable; + } }, - .the_only_possible_value => { - const fields = std.meta.fields(E); - assert(fields.len == 1); - return @intToEnum(E, fields[0].value); + .none => switch (val.tag()) { + .enum_field_index => { + const field_index = val.castTag(.enum_field_index).?.data; + return @intToEnum(E, field_index); + }, + .the_only_possible_value => { + const fields = std.meta.fields(E); + assert(fields.len == 1); + return @intToEnum(E, fields[0].value); + }, + else => unreachable, }, else => unreachable, } @@ -2177,7 +2194,7 @@ pub const Value = struct { std.hash.autoHash(hasher, zig_ty_tag); if (val.isUndef()) return; // The value is runtime-known and shouldn't affect the hash. - if (val.tag() == .runtime_value) return; + if (val.isRuntimeValue()) return; switch (zig_ty_tag) { .Opaque => unreachable, // Cannot hash opaque types @@ -2323,7 +2340,7 @@ pub const Value = struct { pub fn hashUncoerced(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { if (val.isUndef()) return; // The value is runtime-known and shouldn't affect the hash. - if (val.tag() == .runtime_value) return; + if (val.isRuntimeValue()) return; switch (ty.zigTypeTag(mod)) { .Opaque => unreachable, // Cannot hash opaque types From 2f9b7dc1023e9ff21574b559e22265db65e00d2d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 7 May 2023 16:29:42 -0700 Subject: [PATCH 049/205] InternPool: add an int_u8 value encoding On a simple input file, this had a total savings of 21% in the InternPool: Before: int_positive: 427 occurrences, 8975 total bytes After: int_positive: 258 occurrences, 5426 total bytes int_u8: 169 occurrences, 845 total bytes --- src/InternPool.zig | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/InternPool.zig b/src/InternPool.zig index c60f58e20757..4f2e792a4968 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -721,6 +721,9 @@ pub const Tag = enum(u8) { /// only an enum tag, but will be presented via the API with a different Key. /// data is SimpleInternal enum value. simple_internal, + /// Type: u8 + /// data is integer value + int_u8, /// Type: u16 /// data is integer value int_u16, @@ -1056,6 +1059,10 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_error_union => @panic("TODO"), .type_enum_simple => @panic("TODO"), .simple_internal => @panic("TODO"), + .int_u8 => .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = data }, + } }, .int_u16 => .{ .int = .{ .ty = .u16_type, .storage = .{ .u64 = data }, @@ -1226,6 +1233,22 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .int => |int| b: { switch (int.ty) { .none => unreachable, + .u8_type => switch (int.storage) { + .big_int => |big_int| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u8, + .data = big_int.to(u8) catch unreachable, + }); + break :b; + }, + inline .u64, .i64 => |x| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u8, + .data = @intCast(u8, x), + }); + break :b; + }, + }, .u16_type => switch (int.storage) { .big_int => |big_int| { if (big_int.to(u32)) |casted| { @@ -1678,6 +1701,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .simple_type => 0, .simple_value => 0, .simple_internal => 0, + .int_u8 => 0, .int_u16 => 0, .int_u32 => 0, .int_i32 => 0, From 3116477dcc5e85d8fe7b2be2f332796e1425f956 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 7 May 2023 21:48:55 -0700 Subject: [PATCH 050/205] stage2: move empty struct type and value to InternPool --- src/Sema.zig | 1035 ++++++++-------- src/TypedValue.zig | 129 +- src/codegen/c.zig | 13 +- src/type.zig | 2908 ++++++++++++++++++++++---------------------- src/value.zig | 32 +- 5 files changed, 2079 insertions(+), 2038 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index b8b0a6513f23..0085b16ae1e6 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -12615,7 +12615,7 @@ fn analyzeTupleCat( const dest_fields = lhs_len + rhs_len; if (dest_fields == 0) { - return sema.addConstant(Type.initTag(.empty_struct_literal), Value.initTag(.empty_struct_value)); + return sema.addConstant(Type.empty_struct_literal, Value.empty_struct); } if (lhs_len == 0) { return rhs; @@ -12943,7 +12943,7 @@ fn analyzeTupleMul( return sema.fail(block, rhs_src, "operation results in overflow", .{}); if (final_len_u64 == 0) { - return sema.addConstant(Type.initTag(.empty_struct_literal), Value.initTag(.empty_struct_value)); + return sema.addConstant(Type.empty_struct_literal, Value.empty_struct); } const final_len = try sema.usizeCast(block, rhs_src, final_len_u64); @@ -21860,7 +21860,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const args = try sema.resolveInst(extra.args); const args_ty = sema.typeOf(args); - if (!args_ty.isTuple() and args_ty.tag() != .empty_struct_literal) { + if (!args_ty.isTuple() and args_ty.ip_index != .empty_struct_type) { return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)}); } @@ -24780,37 +24780,41 @@ fn structFieldVal( assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); - switch (struct_ty.tag()) { - .tuple, .empty_struct_literal => return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty), - .anon_struct => { - const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); - return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); - }, - .@"struct" => { - const struct_obj = struct_ty.castTag(.@"struct").?.data; - if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); - - const field_index_usize = struct_obj.fields.getIndex(field_name) orelse - return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); - const field_index = @intCast(u32, field_index_usize); - const field = struct_obj.fields.values()[field_index]; + switch (struct_ty.ip_index) { + .empty_struct_type => return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty), + .none => switch (struct_ty.tag()) { + .tuple => return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty), + .anon_struct => { + const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); + return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); + }, + .@"struct" => { + const struct_obj = struct_ty.castTag(.@"struct").?.data; + if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); - if (field.is_comptime) { - return sema.addConstant(field.ty, field.default_val); - } + const field_index_usize = struct_obj.fields.getIndex(field_name) orelse + return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); + const field_index = @intCast(u32, field_index_usize); + const field = struct_obj.fields.values()[field_index]; - if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { - if (struct_val.isUndef()) return sema.addConstUndef(field.ty); - if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { - return sema.addConstant(field.ty, opv); + if (field.is_comptime) { + return sema.addConstant(field.ty, field.default_val); } - const field_values = struct_val.castTag(.aggregate).?.data; - return sema.addConstant(field.ty, field_values[field_index]); - } + if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { + if (struct_val.isUndef()) return sema.addConstUndef(field.ty); + if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { + return sema.addConstant(field.ty, opv); + } + + const field_values = struct_val.castTag(.aggregate).?.data; + return sema.addConstant(field.ty, field_values[field_index]); + } - try sema.requireRuntimeBlock(block, src, null); - return block.addStructFieldVal(struct_byval, field_index, field.ty); + try sema.requireRuntimeBlock(block, src, null); + return block.addStructFieldVal(struct_byval, field_index, field.ty); + }, + else => unreachable, }, else => unreachable, } @@ -27848,6 +27852,19 @@ fn beginComptimePtrMutation( else => unreachable, } }, + .empty_struct => { + const duped = try sema.arena.create(Value); + duped.* = Value.initTag(.the_only_possible_value); + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index), + duped, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, .none => switch (val_ptr.tag()) { .aggregate => return beginComptimePtrMutationInner( sema, @@ -27901,20 +27918,6 @@ fn beginComptimePtrMutation( else => unreachable, }, - .empty_struct_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - else => unreachable, }, else => unreachable, @@ -31502,174 +31505,174 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const mod = sema.mod; - if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => false, - .ptr_type => |ptr_type| { - const child_ty = ptr_type.elem_type.toType(); - if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; - } else { - return sema.resolveTypeRequiresComptime(child_ty); - } - }, - .array_type => |array_type| return sema.resolveTypeRequiresComptime(array_type.child.toType()), - .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), - .opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()), - .error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()), - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .void, - .anyerror, - .@"anyframe", - .noreturn, - .generic_poison, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, + return switch (ty.ip_index) { + .empty_struct_type => false, + .none => switch (ty.tag()) { + .empty_struct, + .error_set, + .error_set_single, + .error_set_inferred, + .error_set_merged, + .@"opaque", + .enum_simple, => false, - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - .type_info, - => true, - - .var_args_param => unreachable, - }, - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type - }; - - return switch (ty.tag()) { - .empty_struct_literal, - .empty_struct, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .@"opaque", - .enum_simple, - => false, + .function => true, - .function => true, + .inferred_alloc_mut => unreachable, + .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, + .array, + .array_sentinel, + => return sema.resolveTypeRequiresComptime(ty.childType(mod)), - .array, - .array_sentinel, - => return sema.resolveTypeRequiresComptime(ty.childType(mod)), + .pointer => { + const child_ty = ty.childType(mod); + if (child_ty.zigTypeTag(mod) == .Fn) { + return child_ty.fnInfo().is_generic; + } else { + return sema.resolveTypeRequiresComptime(child_ty); + } + }, - .pointer => { - const child_ty = ty.childType(mod); - if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; - } else { - return sema.resolveTypeRequiresComptime(child_ty); - } - }, + .optional => { + return sema.resolveTypeRequiresComptime(ty.optionalChild(mod)); + }, - .optional => { - return sema.resolveTypeRequiresComptime(ty.optionalChild(mod)); - }, + .tuple, .anon_struct => { + const tuple = ty.tupleFields(); + for (tuple.types, 0..) |field_ty, i| { + const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; + if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty)) { + return true; + } + } + return false; + }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; - if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty)) { - return true; + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + switch (struct_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + var requires_comptime = false; + struct_obj.requires_comptime = .wip; + for (struct_obj.fields.values()) |field| { + if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; + } + if (requires_comptime) { + struct_obj.requires_comptime = .yes; + } else { + struct_obj.requires_comptime = .no; + } + return requires_comptime; + }, } - } - return false; - }, + }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - var requires_comptime = false; - struct_obj.requires_comptime = .wip; - for (struct_obj.fields.values()) |field| { - if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; - } - if (requires_comptime) { - struct_obj.requires_comptime = .yes; - } else { - struct_obj.requires_comptime = .no; - } - return requires_comptime; - }, - } - }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + switch (union_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + var requires_comptime = false; + union_obj.requires_comptime = .wip; + for (union_obj.fields.values()) |field| { + if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; + } + if (requires_comptime) { + union_obj.requires_comptime = .yes; + } else { + union_obj.requires_comptime = .no; + } + return requires_comptime; + }, + } + }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - var requires_comptime = false; - union_obj.requires_comptime = .wip; - for (union_obj.fields.values()) |field| { - if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; - } - if (requires_comptime) { - union_obj.requires_comptime = .yes; - } else { - union_obj.requires_comptime = .no; - } - return requires_comptime; - }, - } + .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), + .anyframe_T => { + const child_ty = ty.castTag(.anyframe_T).?.data; + return sema.resolveTypeRequiresComptime(child_ty); + }, + .enum_numbered => { + const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; + return sema.resolveTypeRequiresComptime(tag_ty); + }, + .enum_full, .enum_nonexhaustive => { + const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; + return sema.resolveTypeRequiresComptime(tag_ty); + }, }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => false, + .ptr_type => |ptr_type| { + const child_ty = ptr_type.elem_type.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) { + return child_ty.fnInfo().is_generic; + } else { + return sema.resolveTypeRequiresComptime(child_ty); + } + }, + .array_type => |array_type| return sema.resolveTypeRequiresComptime(array_type.child.toType()), + .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), + .opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()), + .error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()), + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .@"anyframe", + .noreturn, + .generic_poison, + .var_args_param, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, - .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), - .anyframe_T => { - const child_ty = ty.castTag(.anyframe_T).?.data; - return sema.resolveTypeRequiresComptime(child_ty); - }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return sema.resolveTypeRequiresComptime(tag_ty); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return sema.resolveTypeRequiresComptime(tag_ty); + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type }, }; } @@ -32957,237 +32960,240 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const mod = sema.mod; - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| { - if (int_type.bits == 0) { - return try mod.intValue(ty, 0); - } else { - return null; - } - }, - .ptr_type => return null, - .array_type => |array_type| { - if (array_type.len == 0) - return Value.initTag(.empty_array); - if ((try sema.typeHasOnePossibleValue(array_type.child.toType())) != null) { - return Value.initTag(.the_only_possible_value); - } - return null; - }, - .vector_type => |vector_type| { - if (vector_type.len == 0) return Value.initTag(.empty_array); - if (try sema.typeHasOnePossibleValue(vector_type.child.toType())) |v| return v; - return null; - }, - .opt_type => |child| { - if (child.toType().isNoReturn()) { - return Value.null; - } else { - return null; - } - }, - .error_union_type => return null, - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .type, - .anyerror, - .comptime_int, - .comptime_float, - .@"anyframe", - .enum_literal, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => return null, + switch (ty.ip_index) { + .empty_struct_type => return Value.empty_struct, - .void => return Value.void, - .noreturn => return Value.@"unreachable", - .null => return Value.null, - .undefined => return Value.undef, + .none => switch (ty.tag()) { + .error_set_single, + .error_set, + .error_set_merged, + .error_union, + .function, + .array_sentinel, + .error_set_inferred, + .@"opaque", + .anyframe_T, + .pointer, + => return null, - .generic_poison => return error.GenericPoison, - .var_args_param => unreachable, - }, - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type - }; + .optional => { + const child_ty = ty.optionalChild(mod); + if (child_ty.isNoReturn()) { + return Value.null; + } else { + return null; + } + }, - switch (ty.tag()) { - .error_set_single, - .error_set, - .error_set_merged, - .error_union, - .function, - .array_sentinel, - .error_set_inferred, - .@"opaque", - .anyframe_T, - .pointer, - => return null, + .@"struct" => { + const resolved_ty = try sema.resolveTypeFields(ty); + const s = resolved_ty.castTag(.@"struct").?.data; + for (s.fields.values(), 0..) |field, i| { + if (field.is_comptime) continue; + if (field.ty.eql(resolved_ty, sema.mod)) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + s.srcLoc(sema.mod), + "struct '{}' depends on itself", + .{ty.fmt(sema.mod)}, + ); + try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{}); + return sema.failWithOwnedErrorMsg(msg); + } + if ((try sema.typeHasOnePossibleValue(field.ty)) == null) { + return null; + } + } + return Value.empty_struct; + }, - .optional => { - const child_ty = ty.optionalChild(mod); - if (child_ty.isNoReturn()) { - return Value.null; - } else { - return null; - } - }, + .tuple, .anon_struct => { + const tuple = ty.tupleFields(); + for (tuple.values, 0..) |val, i| { + const is_comptime = val.ip_index != .unreachable_value; + if (is_comptime) continue; + if ((try sema.typeHasOnePossibleValue(tuple.types[i])) != null) continue; + return null; + } + return Value.empty_struct; + }, - .@"struct" => { - const resolved_ty = try sema.resolveTypeFields(ty); - const s = resolved_ty.castTag(.@"struct").?.data; - for (s.fields.values(), 0..) |field, i| { - if (field.is_comptime) continue; - if (field.ty.eql(resolved_ty, sema.mod)) { + .enum_numbered => { + const resolved_ty = try sema.resolveTypeFields(ty); + const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; + // An explicit tag type is always provided for enum_numbered. + if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { + return null; + } + if (enum_obj.fields.count() == 1) { + if (enum_obj.values.count() == 0) { + return try mod.intValue(ty, 0); // auto-numbered + } else { + return enum_obj.values.keys()[0]; + } + } else { + return null; + } + }, + .enum_full => { + const resolved_ty = try sema.resolveTypeFields(ty); + const enum_obj = resolved_ty.castTag(.enum_full).?.data; + if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { + return null; + } + switch (enum_obj.fields.count()) { + 0 => return Value.@"unreachable", + 1 => if (enum_obj.values.count() == 0) { + return try mod.intValue(ty, 0); // auto-numbered + } else { + return enum_obj.values.keys()[0]; + }, + else => return null, + } + }, + .enum_simple => { + const resolved_ty = try sema.resolveTypeFields(ty); + const enum_simple = resolved_ty.castTag(.enum_simple).?.data; + switch (enum_simple.fields.count()) { + 0 => return Value.@"unreachable", + 1 => return try mod.intValue(ty, 0), + else => return null, + } + }, + .enum_nonexhaustive => { + const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; + if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { + return try mod.intValue(ty, 0); + } else { + return null; + } + }, + .@"union", .union_safety_tagged, .union_tagged => { + const resolved_ty = try sema.resolveTypeFields(ty); + const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; + const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse + return null; + const fields = union_obj.fields.values(); + if (fields.len == 0) return Value.@"unreachable"; + const only_field = fields[0]; + if (only_field.ty.eql(resolved_ty, sema.mod)) { const msg = try Module.ErrorMsg.create( sema.gpa, - s.srcLoc(sema.mod), - "struct '{}' depends on itself", + union_obj.srcLoc(sema.mod), + "union '{}' depends on itself", .{ty.fmt(sema.mod)}, ); - try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{}); + try sema.addFieldErrNote(resolved_ty, 0, msg, "while checking this field", .{}); return sema.failWithOwnedErrorMsg(msg); } - if ((try sema.typeHasOnePossibleValue(field.ty)) == null) { + const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse return null; - } - } - return Value.initTag(.empty_struct_value); - }, + // TODO make this not allocate. + return try Value.Tag.@"union".create(sema.arena, .{ + .tag = tag_val, + .val = val_val, + }); + }, + + .empty_struct => return Value.empty_struct, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |val, i| { - const is_comptime = val.ip_index != .unreachable_value; - if (is_comptime) continue; - if ((try sema.typeHasOnePossibleValue(tuple.types[i])) != null) continue; + .array => { + if (ty.arrayLen(mod) == 0) + return Value.initTag(.empty_array); + if ((try sema.typeHasOnePossibleValue(ty.childType(mod))) != null) { + return Value.initTag(.the_only_possible_value); + } return null; - } - return Value.initTag(.empty_struct_value); + }, + + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, }, - .enum_numbered => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; - // An explicit tag type is always provided for enum_numbered. - if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { - return null; - } - if (enum_obj.fields.count() == 1) { - if (enum_obj.values.count() == 0) { - return try mod.intValue(ty, 0); // auto-numbered + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return try mod.intValue(ty, 0); } else { - return enum_obj.values.keys()[0]; + return null; + } + }, + .ptr_type => return null, + .array_type => |array_type| { + if (array_type.len == 0) + return Value.initTag(.empty_array); + if ((try sema.typeHasOnePossibleValue(array_type.child.toType())) != null) { + return Value.initTag(.the_only_possible_value); } - } else { return null; - } - }, - .enum_full => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_obj = resolved_ty.castTag(.enum_full).?.data; - if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { + }, + .vector_type => |vector_type| { + if (vector_type.len == 0) return Value.initTag(.empty_array); + if (try sema.typeHasOnePossibleValue(vector_type.child.toType())) |v| return v; return null; - } - switch (enum_obj.fields.count()) { - 0 => return Value.@"unreachable", - 1 => if (enum_obj.values.count() == 0) { - return try mod.intValue(ty, 0); // auto-numbered + }, + .opt_type => |child| { + if (child.toType().isNoReturn()) { + return Value.null; } else { - return enum_obj.values.keys()[0]; - }, - else => return null, - } - }, - .enum_simple => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_simple = resolved_ty.castTag(.enum_simple).?.data; - switch (enum_simple.fields.count()) { - 0 => return Value.@"unreachable", - 1 => return try mod.intValue(ty, 0), - else => return null, - } - }, - .enum_nonexhaustive => { - const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { - return try mod.intValue(ty, 0); - } else { - return null; - } - }, - .@"union", .union_safety_tagged, .union_tagged => { - const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; - const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse - return null; - const fields = union_obj.fields.values(); - if (fields.len == 0) return Value.@"unreachable"; - const only_field = fields[0]; - if (only_field.ty.eql(resolved_ty, sema.mod)) { - const msg = try Module.ErrorMsg.create( - sema.gpa, - union_obj.srcLoc(sema.mod), - "union '{}' depends on itself", - .{ty.fmt(sema.mod)}, - ); - try sema.addFieldErrNote(resolved_ty, 0, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(msg); - } - const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse - return null; - // TODO make this not allocate. The function in `Type.onePossibleValue` - // currently returns `empty_struct_value` and we should do that here too. - return try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = val_val, - }); - }, + return null; + } + }, + .error_union_type => return null, + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .@"anyframe", + .enum_literal, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + => return null, - .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), + .void => return Value.void, + .noreturn => return Value.@"unreachable", + .null => return Value.null, + .undefined => return Value.undef, - .array => { - if (ty.arrayLen(mod) == 0) - return Value.initTag(.empty_array); - if ((try sema.typeHasOnePossibleValue(ty.childType(mod))) != null) { - return Value.initTag(.the_only_possible_value); - } - return null; + .generic_poison => return error.GenericPoison, + .var_args_param => unreachable, + }, + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type }, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, } } @@ -33567,8 +33573,116 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { /// elsewhere in value.zig pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const mod = sema.mod; - if (ty.ip_index != .none) { - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (ty.ip_index) { + .empty_struct_type => false, + + .none => switch (ty.tag()) { + .empty_struct, + .error_set, + .error_set_single, + .error_set_inferred, + .error_set_merged, + .@"opaque", + .enum_simple, + => false, + + .function => true, + + .inferred_alloc_mut => unreachable, + .inferred_alloc_const => unreachable, + + .array, + .array_sentinel, + => return sema.typeRequiresComptime(ty.childType(mod)), + + .pointer => { + const child_ty = ty.childType(mod); + if (child_ty.zigTypeTag(mod) == .Fn) { + return child_ty.fnInfo().is_generic; + } else { + return sema.typeRequiresComptime(child_ty); + } + }, + + .optional => { + return sema.typeRequiresComptime(ty.optionalChild(mod)); + }, + + .tuple, .anon_struct => { + const tuple = ty.tupleFields(); + for (tuple.types, 0..) |field_ty, i| { + const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; + if (!have_comptime_val and try sema.typeRequiresComptime(field_ty)) { + return true; + } + } + return false; + }, + + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + switch (struct_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (struct_obj.status == .field_types_wip) + return false; + + try sema.resolveTypeFieldsStruct(ty, struct_obj); + + struct_obj.requires_comptime = .wip; + for (struct_obj.fields.values()) |field| { + if (field.is_comptime) continue; + if (try sema.typeRequiresComptime(field.ty)) { + struct_obj.requires_comptime = .yes; + return true; + } + } + struct_obj.requires_comptime = .no; + return false; + }, + } + }, + + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + switch (union_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (union_obj.status == .field_types_wip) + return false; + + try sema.resolveTypeFieldsUnion(ty, union_obj); + + union_obj.requires_comptime = .wip; + for (union_obj.fields.values()) |field| { + if (try sema.typeRequiresComptime(field.ty)) { + union_obj.requires_comptime = .yes; + return true; + } + } + union_obj.requires_comptime = .no; + return false; + }, + } + }, + + .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), + .anyframe_T => { + const child_ty = ty.castTag(.anyframe_T).?.data; + return sema.typeRequiresComptime(child_ty); + }, + .enum_numbered => { + const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; + return sema.typeRequiresComptime(tag_ty); + }, + .enum_full, .enum_nonexhaustive => { + const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; + return sema.typeRequiresComptime(tag_ty); + }, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return false, .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); @@ -33638,113 +33752,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .extern_func => unreachable, .int => unreachable, .enum_tag => unreachable, // it's a value, not a type - } - } - return switch (ty.tag()) { - .empty_struct_literal, - .empty_struct, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .@"opaque", - .enum_simple, - => false, - - .function => true, - - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - - .array, - .array_sentinel, - => return sema.typeRequiresComptime(ty.childType(mod)), - - .pointer => { - const child_ty = ty.childType(mod); - if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; - } else { - return sema.typeRequiresComptime(child_ty); - } - }, - - .optional => { - return sema.typeRequiresComptime(ty.optionalChild(mod)); - }, - - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; - if (!have_comptime_val and try sema.typeRequiresComptime(field_ty)) { - return true; - } - } - return false; - }, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - if (struct_obj.status == .field_types_wip) - return false; - - try sema.resolveTypeFieldsStruct(ty, struct_obj); - - struct_obj.requires_comptime = .wip; - for (struct_obj.fields.values()) |field| { - if (field.is_comptime) continue; - if (try sema.typeRequiresComptime(field.ty)) { - struct_obj.requires_comptime = .yes; - return true; - } - } - struct_obj.requires_comptime = .no; - return false; - }, - } - }, - - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - if (union_obj.status == .field_types_wip) - return false; - - try sema.resolveTypeFieldsUnion(ty, union_obj); - - union_obj.requires_comptime = .wip; - for (union_obj.fields.values()) |field| { - if (try sema.typeRequiresComptime(field.ty)) { - union_obj.requires_comptime = .yes; - return true; - } - } - union_obj.requires_comptime = .no; - return false; - }, - } - }, - - .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), - .anyframe_T => { - const child_ty = ty.castTag(.anyframe_T).?.data; - return sema.typeRequiresComptime(child_ty); - }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return sema.typeRequiresComptime(tag_ty); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return sema.typeRequiresComptime(tag_ty); }, }; } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 828fb610d492..fae637cf2484 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -80,67 +80,9 @@ pub fn print( return writer.writeAll("(variable)"); while (true) switch (val.ip_index) { + .empty_struct => return printAggregate(ty, val, writer, level, mod), .none => switch (val.tag()) { - .empty_struct_value, .aggregate => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - if (ty.zigTypeTag(mod) == .Struct) { - try writer.writeAll(".{"); - const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); - - var i: u32 = 0; - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - switch (ty.tag()) { - .anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}), - else => {}, - } - try print(.{ - .ty = ty.structFieldType(i), - .val = try val.fieldValue(ty, mod, i), - }, writer, level - 1, mod); - } - if (ty.structFieldCount() > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll("}"); - } else { - const elem_ty = ty.elemType2(mod); - const len = ty.arrayLen(mod); - - if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); - var buf: [max_string_len]u8 = undefined; - - var i: u32 = 0; - while (i < max_len) : (i += 1) { - const elem = try val.fieldValue(ty, mod, i); - if (elem.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; - } - - const truncated = if (len > max_string_len) " (truncated)" else ""; - return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); - } - - try writer.writeAll(".{ "); - - const max_len = std.math.min(len, max_aggregate_items); - var i: u32 = 0; - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - try print(.{ - .ty = elem_ty, - .val = try val.fieldValue(ty, mod, i), - }, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - } - }, + .aggregate => return printAggregate(ty, val, writer, level, mod), .@"union" => { if (level == 0) { return writer.writeAll(".{ ... }"); @@ -426,3 +368,70 @@ pub fn print( }, }; } + +fn printAggregate( + ty: Type, + val: Value, + writer: anytype, + level: u8, + mod: *Module, +) (@TypeOf(writer).Error || Allocator.Error)!void { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + if (ty.zigTypeTag(mod) == .Struct) { + try writer.writeAll(".{"); + const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); + + var i: u32 = 0; + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + switch (ty.tag()) { + .anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}), + else => {}, + } + try print(.{ + .ty = ty.structFieldType(i), + .val = try val.fieldValue(ty, mod, i), + }, writer, level - 1, mod); + } + if (ty.structFieldCount() > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll("}"); + } else { + const elem_ty = ty.elemType2(mod); + const len = ty.arrayLen(mod); + + if (elem_ty.eql(Type.u8, mod)) str: { + const max_len = @intCast(usize, std.math.min(len, max_string_len)); + var buf: [max_string_len]u8 = undefined; + + var i: u32 = 0; + while (i < max_len) : (i += 1) { + const elem = try val.fieldValue(ty, mod, i); + if (elem.isUndef()) break :str; + buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; + } + + const truncated = if (len > max_string_len) " (truncated)" else ""; + return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); + } + + try writer.writeAll(".{ "); + + const max_len = std.math.min(len, max_aggregate_items); + var i: u32 = 0; + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + try print(.{ + .ty = elem_ty, + .val = try val.fieldValue(ty, mod, i), + }, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); + } +} diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 3c6f5a9e73e2..60f2d86a3d1d 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1127,8 +1127,19 @@ pub const DeclGen = struct { try writer.writeByte('}'); return; }, + .empty_struct => { + const ai = ty.arrayInfo(mod); + try writer.writeByte('{'); + if (ai.sentinel) |s| { + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } else { + try writer.writeByte('0'); + } + try writer.writeByte('}'); + return; + }, .none => switch (val.tag()) { - .empty_struct_value, .empty_array => { + .empty_array => { const ai = ty.arrayInfo(mod); try writer.writeByte('{'); if (ai.sentinel) |s| { diff --git a/src/type.zig b/src/type.zig index b271a3ea4592..d784a25eb3d9 100644 --- a/src/type.zig +++ b/src/type.zig @@ -34,8 +34,51 @@ pub const Type = struct { } pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { - if (ty.ip_index != .none) { - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .error_set, + .error_set_single, + .error_set_inferred, + .error_set_merged, + => return .ErrorSet, + + .@"opaque" => return .Opaque, + + .function => return .Fn, + + .array, + .array_sentinel, + => return .Array, + + .pointer, + .inferred_alloc_const, + .inferred_alloc_mut, + => return .Pointer, + + .optional => return .Optional, + + .error_union => return .ErrorUnion, + + .anyframe_T => return .AnyFrame, + + .empty_struct, + .@"struct", + .tuple, + .anon_struct, + => return .Struct, + + .enum_full, + .enum_nonexhaustive, + .enum_simple, + .enum_numbered, + => return .Enum, + + .@"union", + .union_safety_tagged, + .union_tagged, + => return .Union, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return .Int, .ptr_type => return .Pointer, .array_type => return .Array, @@ -104,51 +147,7 @@ pub const Type = struct { .enum_tag, .simple_value, => unreachable, // it's a value, not a type - } - } - switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => return .ErrorSet, - - .@"opaque" => return .Opaque, - - .function => return .Fn, - - .array, - .array_sentinel, - => return .Array, - - .pointer, - .inferred_alloc_const, - .inferred_alloc_mut, - => return .Pointer, - - .optional => return .Optional, - - .error_union => return .ErrorUnion, - - .anyframe_T => return .AnyFrame, - - .empty_struct, - .empty_struct_literal, - .@"struct", - .tuple, - .anon_struct, - => return .Struct, - - .enum_full, - .enum_nonexhaustive, - .enum_simple, - .enum_numbered, - => return .Enum, - - .@"union", - .union_safety_tagged, - .union_tagged, - => return .Union, + }, } } @@ -517,7 +516,7 @@ pub const Type = struct { const b_struct_obj = (b.castTag(.@"struct") orelse return false).data; return a_struct_obj == b_struct_obj; }, - .tuple, .empty_struct_literal => { + .tuple => { if (!b.isSimpleTuple()) return false; const a_tuple = a.tupleFields(); @@ -741,7 +740,7 @@ pub const Type = struct { const struct_obj: *const Module.Struct = ty.castTag(.@"struct").?.data; std.hash.autoHash(hasher, struct_obj); }, - .tuple, .empty_struct_literal => { + .tuple => { std.hash.autoHash(hasher, std.builtin.TypeId.Struct); const tuple = ty.tupleFields(); @@ -837,7 +836,6 @@ pub const Type = struct { } else switch (self.legacy.ptr_otherwise.tag) { .inferred_alloc_const, .inferred_alloc_mut, - .empty_struct_literal, => unreachable, .optional, @@ -1047,7 +1045,7 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .empty_struct, .empty_struct_literal => return writer.writeAll("struct {}"), + .empty_struct => return writer.writeAll("struct {}"), .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -1266,327 +1264,328 @@ pub const Type = struct { /// Prints a name suitable for `@typeName`. pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| { - const sign_char: u8 = switch (int_type.signedness) { - .signed => 'i', - .unsigned => 'u', - }; - return writer.print("{c}{d}", .{ sign_char, int_type.bits }); - }, - .ptr_type => { - const info = ty.ptrInfo(mod); - - if (info.sentinel) |s| switch (info.size) { - .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, mod)}), - .Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, mod)}), - } else switch (info.size) { - .One => try writer.writeAll("*"), - .Many => try writer.writeAll("[*]"), - .C => try writer.writeAll("[*c]"), - .Slice => try writer.writeAll("[]"), - } - if (info.@"align" != 0 or info.host_size != 0 or info.vector_index != .none) { - if (info.@"align" != 0) { - try writer.print("align({d}", .{info.@"align"}); - } else { - const alignment = info.pointee_type.abiAlignment(mod); - try writer.print("align({d}", .{alignment}); - } - - if (info.bit_offset != 0 or info.host_size != 0) { - try writer.print(":{d}:{d}", .{ info.bit_offset, info.host_size }); - } - if (info.vector_index == .runtime) { - try writer.writeAll(":?"); - } else if (info.vector_index != .none) { - try writer.print(":{d}", .{@enumToInt(info.vector_index)}); - } - try writer.writeAll(") "); - } - if (info.@"addrspace" != .generic) { - try writer.print("addrspace(.{s}) ", .{@tagName(info.@"addrspace")}); - } - if (!info.mutable) try writer.writeAll("const "); - if (info.@"volatile") try writer.writeAll("volatile "); - if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero "); - - try print(info.pointee_type, writer, mod); - return; - }, - .array_type => |array_type| { - if (array_type.sentinel == .none) { - try writer.print("[{d}]", .{array_type.len}); - try print(array_type.child.toType(), writer, mod); - } else { - try writer.print("[{d}:{}]", .{ - array_type.len, - array_type.sentinel.toValue().fmtValue(array_type.child.toType(), mod), - }); - try print(array_type.child.toType(), writer, mod); - } - return; - }, - .vector_type => |vector_type| { - try writer.print("@Vector({d}, ", .{vector_type.len}); - try print(vector_type.child.toType(), writer, mod); - try writer.writeAll(")"); - return; - }, - .opt_type => |child| { - try writer.writeByte('?'); - try print(child.toType(), writer, mod); - return; - }, - .error_union_type => |error_union_type| { - try print(error_union_type.error_set_type.toType(), writer, mod); - try writer.writeByte('!'); - try print(error_union_type.payload_type.toType(), writer, mod); - return; - }, - .simple_type => |s| return writer.writeAll(@tagName(s)), - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .enum_tag => unreachable, - }; - const t = ty.tag(); - switch (t) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, + switch (ty.ip_index) { + .empty_struct_type => try writer.writeAll("@TypeOf(.{})"), - .empty_struct_literal => try writer.writeAll("@TypeOf(.{})"), + .none => switch (ty.tag()) { + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, - .empty_struct => { - const namespace = ty.castTag(.empty_struct).?.data; - try namespace.renderFullyQualifiedName(mod, "", writer); - }, + .empty_struct => { + const namespace = ty.castTag(.empty_struct).?.data; + try namespace.renderFullyQualifiedName(mod, "", writer); + }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - const decl = mod.declPtr(struct_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const decl = mod.declPtr(union_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - const decl = mod.declPtr(enum_full.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const decl = mod.declPtr(enum_simple.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - const decl = mod.declPtr(enum_numbered.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .@"opaque" => { - const opaque_obj = ty.cast(Payload.Opaque).?.data; - const decl = mod.declPtr(opaque_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + const decl = mod.declPtr(struct_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + const decl = mod.declPtr(union_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .enum_full, .enum_nonexhaustive => { + const enum_full = ty.cast(Payload.EnumFull).?.data; + const decl = mod.declPtr(enum_full.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .enum_simple => { + const enum_simple = ty.castTag(.enum_simple).?.data; + const decl = mod.declPtr(enum_simple.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .enum_numbered => { + const enum_numbered = ty.castTag(.enum_numbered).?.data; + const decl = mod.declPtr(enum_numbered.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .@"opaque" => { + const opaque_obj = ty.cast(Payload.Opaque).?.data; + const decl = mod.declPtr(opaque_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + }, - .error_set_inferred => { - const func = ty.castTag(.error_set_inferred).?.data.func; + .error_set_inferred => { + const func = ty.castTag(.error_set_inferred).?.data.func; - try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - const owner_decl = mod.declPtr(func.owner_decl); - try owner_decl.renderFullyQualifiedName(mod, writer); - try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); - }, + try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); + const owner_decl = mod.declPtr(func.owner_decl); + try owner_decl.renderFullyQualifiedName(mod, writer); + try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); + }, - .function => { - const fn_info = ty.fnInfo(); - if (fn_info.is_noinline) { - try writer.writeAll("noinline "); - } - try writer.writeAll("fn("); - for (fn_info.param_types, 0..) |param_ty, i| { - if (i != 0) try writer.writeAll(", "); - if (fn_info.paramIsComptime(i)) { - try writer.writeAll("comptime "); + .function => { + const fn_info = ty.fnInfo(); + if (fn_info.is_noinline) { + try writer.writeAll("noinline "); } - if (std.math.cast(u5, i)) |index| if (@truncate(u1, fn_info.noalias_bits >> index) != 0) { - try writer.writeAll("noalias "); - }; - if (param_ty.isGenericPoison()) { - try writer.writeAll("anytype"); - } else { - try print(param_ty, writer, mod); + try writer.writeAll("fn("); + for (fn_info.param_types, 0..) |param_ty, i| { + if (i != 0) try writer.writeAll(", "); + if (fn_info.paramIsComptime(i)) { + try writer.writeAll("comptime "); + } + if (std.math.cast(u5, i)) |index| if (@truncate(u1, fn_info.noalias_bits >> index) != 0) { + try writer.writeAll("noalias "); + }; + if (param_ty.isGenericPoison()) { + try writer.writeAll("anytype"); + } else { + try print(param_ty, writer, mod); + } } - } - if (fn_info.is_var_args) { - if (fn_info.param_types.len != 0) { - try writer.writeAll(", "); + if (fn_info.is_var_args) { + if (fn_info.param_types.len != 0) { + try writer.writeAll(", "); + } + try writer.writeAll("..."); } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (fn_info.alignment != 0) { - try writer.print("align({d}) ", .{fn_info.alignment}); - } - if (fn_info.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(fn_info.cc)); try writer.writeAll(") "); - } - if (fn_info.return_type.isGenericPoison()) { - try writer.writeAll("anytype"); - } else { - try print(fn_info.return_type, writer, mod); - } - }, + if (fn_info.alignment != 0) { + try writer.print("align({d}) ", .{fn_info.alignment}); + } + if (fn_info.cc != .Unspecified) { + try writer.writeAll("callconv(."); + try writer.writeAll(@tagName(fn_info.cc)); + try writer.writeAll(") "); + } + if (fn_info.return_type.isGenericPoison()) { + try writer.writeAll("anytype"); + } else { + try print(fn_info.return_type, writer, mod); + } + }, - .error_union => { - const error_union = ty.castTag(.error_union).?.data; - try print(error_union.error_set, writer, mod); - try writer.writeAll("!"); - try print(error_union.payload, writer, mod); - }, + .error_union => { + const error_union = ty.castTag(.error_union).?.data; + try print(error_union.error_set, writer, mod); + try writer.writeAll("!"); + try print(error_union.payload, writer, mod); + }, - .array => { - const payload = ty.castTag(.array).?.data; - try writer.print("[{d}]", .{payload.len}); - try print(payload.elem_type, writer, mod); - }, - .array_sentinel => { - const payload = ty.castTag(.array_sentinel).?.data; - try writer.print("[{d}:{}]", .{ - payload.len, - payload.sentinel.fmtValue(payload.elem_type, mod), - }); - try print(payload.elem_type, writer, mod); - }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; + .array => { + const payload = ty.castTag(.array).?.data; + try writer.print("[{d}]", .{payload.len}); + try print(payload.elem_type, writer, mod); + }, + .array_sentinel => { + const payload = ty.castTag(.array_sentinel).?.data; + try writer.print("[{d}:{}]", .{ + payload.len, + payload.sentinel.fmtValue(payload.elem_type, mod), + }); + try print(payload.elem_type, writer, mod); + }, + .tuple => { + const tuple = ty.castTag(.tuple).?.data; - try writer.writeAll("tuple{"); - for (tuple.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = tuple.values[i]; - if (val.ip_index != .unreachable_value) { - try writer.writeAll("comptime "); - } - try print(field_ty, writer, mod); - if (val.ip_index != .unreachable_value) { - try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); + try writer.writeAll("tuple{"); + for (tuple.types, 0..) |field_ty, i| { + if (i != 0) try writer.writeAll(", "); + const val = tuple.values[i]; + if (val.ip_index != .unreachable_value) { + try writer.writeAll("comptime "); + } + try print(field_ty, writer, mod); + if (val.ip_index != .unreachable_value) { + try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); + } } - } - try writer.writeAll("}"); - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; + try writer.writeAll("}"); + }, + .anon_struct => { + const anon_struct = ty.castTag(.anon_struct).?.data; - try writer.writeAll("struct{"); - for (anon_struct.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = anon_struct.values[i]; - if (val.ip_index != .unreachable_value) { - try writer.writeAll("comptime "); - } - try writer.writeAll(anon_struct.names[i]); - try writer.writeAll(": "); + try writer.writeAll("struct{"); + for (anon_struct.types, 0..) |field_ty, i| { + if (i != 0) try writer.writeAll(", "); + const val = anon_struct.values[i]; + if (val.ip_index != .unreachable_value) { + try writer.writeAll("comptime "); + } + try writer.writeAll(anon_struct.names[i]); + try writer.writeAll(": "); - try print(field_ty, writer, mod); + try print(field_ty, writer, mod); - if (val.ip_index != .unreachable_value) { - try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); + if (val.ip_index != .unreachable_value) { + try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); + } } - } - try writer.writeAll("}"); - }, + try writer.writeAll("}"); + }, - .pointer => { - const info = ty.ptrInfo(mod); + .pointer => { + const info = ty.ptrInfo(mod); - if (info.sentinel) |s| switch (info.size) { - .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, mod)}), - .Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, mod)}), - } else switch (info.size) { - .One => try writer.writeAll("*"), - .Many => try writer.writeAll("[*]"), - .C => try writer.writeAll("[*c]"), - .Slice => try writer.writeAll("[]"), - } - if (info.@"align" != 0 or info.host_size != 0 or info.vector_index != .none) { - if (info.@"align" != 0) { - try writer.print("align({d}", .{info.@"align"}); - } else { - const alignment = info.pointee_type.abiAlignment(mod); - try writer.print("align({d}", .{alignment}); + if (info.sentinel) |s| switch (info.size) { + .One, .C => unreachable, + .Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, mod)}), + .Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, mod)}), + } else switch (info.size) { + .One => try writer.writeAll("*"), + .Many => try writer.writeAll("[*]"), + .C => try writer.writeAll("[*c]"), + .Slice => try writer.writeAll("[]"), } + if (info.@"align" != 0 or info.host_size != 0 or info.vector_index != .none) { + if (info.@"align" != 0) { + try writer.print("align({d}", .{info.@"align"}); + } else { + const alignment = info.pointee_type.abiAlignment(mod); + try writer.print("align({d}", .{alignment}); + } - if (info.bit_offset != 0 or info.host_size != 0) { - try writer.print(":{d}:{d}", .{ info.bit_offset, info.host_size }); + if (info.bit_offset != 0 or info.host_size != 0) { + try writer.print(":{d}:{d}", .{ info.bit_offset, info.host_size }); + } + if (info.vector_index == .runtime) { + try writer.writeAll(":?"); + } else if (info.vector_index != .none) { + try writer.print(":{d}", .{@enumToInt(info.vector_index)}); + } + try writer.writeAll(") "); } - if (info.vector_index == .runtime) { - try writer.writeAll(":?"); - } else if (info.vector_index != .none) { - try writer.print(":{d}", .{@enumToInt(info.vector_index)}); + if (info.@"addrspace" != .generic) { + try writer.print("addrspace(.{s}) ", .{@tagName(info.@"addrspace")}); } - try writer.writeAll(") "); - } - if (info.@"addrspace" != .generic) { - try writer.print("addrspace(.{s}) ", .{@tagName(info.@"addrspace")}); - } - if (!info.mutable) try writer.writeAll("const "); - if (info.@"volatile") try writer.writeAll("volatile "); - if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero "); - - try print(info.pointee_type, writer, mod); - }, + if (!info.mutable) try writer.writeAll("const "); + if (info.@"volatile") try writer.writeAll("volatile "); + if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero "); - .optional => { - const child_type = ty.castTag(.optional).?.data; - try writer.writeByte('?'); - try print(child_type, writer, mod); - }, - .anyframe_T => { - const return_type = ty.castTag(.anyframe_T).?.data; - try writer.print("anyframe->", .{}); - try print(return_type, writer, mod); - }, - .error_set => { - const names = ty.castTag(.error_set).?.data.names.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); - } - try writer.writeAll("}"); - }, - .error_set_single => { - const name = ty.castTag(.error_set_single).?.data; - return writer.print("error{{{s}}}", .{name}); - }, - .error_set_merged => { - const names = ty.castTag(.error_set_merged).?.data.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); - } - try writer.writeAll("}"); - }, - } - } + try print(info.pointee_type, writer, mod); + }, - pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { - if (self.ip_index != .none) return self.ip_index.toValue(); - switch (self.tag()) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, + .optional => { + const child_type = ty.castTag(.optional).?.data; + try writer.writeByte('?'); + try print(child_type, writer, mod); + }, + .anyframe_T => { + const return_type = ty.castTag(.anyframe_T).?.data; + try writer.print("anyframe->", .{}); + try print(return_type, writer, mod); + }, + .error_set => { + const names = ty.castTag(.error_set).?.data.names.keys(); + try writer.writeAll("error{"); + for (names, 0..) |name, i| { + if (i != 0) try writer.writeByte(','); + try writer.writeAll(name); + } + try writer.writeAll("}"); + }, + .error_set_single => { + const name = ty.castTag(.error_set_single).?.data; + return writer.print("error{{{s}}}", .{name}); + }, + .error_set_merged => { + const names = ty.castTag(.error_set_merged).?.data.keys(); + try writer.writeAll("error{"); + for (names, 0..) |name, i| { + if (i != 0) try writer.writeByte(','); + try writer.writeAll(name); + } + try writer.writeAll("}"); + }, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + const sign_char: u8 = switch (int_type.signedness) { + .signed => 'i', + .unsigned => 'u', + }; + return writer.print("{c}{d}", .{ sign_char, int_type.bits }); + }, + .ptr_type => { + const info = ty.ptrInfo(mod); + + if (info.sentinel) |s| switch (info.size) { + .One, .C => unreachable, + .Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, mod)}), + .Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, mod)}), + } else switch (info.size) { + .One => try writer.writeAll("*"), + .Many => try writer.writeAll("[*]"), + .C => try writer.writeAll("[*c]"), + .Slice => try writer.writeAll("[]"), + } + if (info.@"align" != 0 or info.host_size != 0 or info.vector_index != .none) { + if (info.@"align" != 0) { + try writer.print("align({d}", .{info.@"align"}); + } else { + const alignment = info.pointee_type.abiAlignment(mod); + try writer.print("align({d}", .{alignment}); + } + + if (info.bit_offset != 0 or info.host_size != 0) { + try writer.print(":{d}:{d}", .{ info.bit_offset, info.host_size }); + } + if (info.vector_index == .runtime) { + try writer.writeAll(":?"); + } else if (info.vector_index != .none) { + try writer.print(":{d}", .{@enumToInt(info.vector_index)}); + } + try writer.writeAll(") "); + } + if (info.@"addrspace" != .generic) { + try writer.print("addrspace(.{s}) ", .{@tagName(info.@"addrspace")}); + } + if (!info.mutable) try writer.writeAll("const "); + if (info.@"volatile") try writer.writeAll("volatile "); + if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero "); + + try print(info.pointee_type, writer, mod); + return; + }, + .array_type => |array_type| { + if (array_type.sentinel == .none) { + try writer.print("[{d}]", .{array_type.len}); + try print(array_type.child.toType(), writer, mod); + } else { + try writer.print("[{d}:{}]", .{ + array_type.len, + array_type.sentinel.toValue().fmtValue(array_type.child.toType(), mod), + }); + try print(array_type.child.toType(), writer, mod); + } + return; + }, + .vector_type => |vector_type| { + try writer.print("@Vector({d}, ", .{vector_type.len}); + try print(vector_type.child.toType(), writer, mod); + try writer.writeAll(")"); + return; + }, + .opt_type => |child| { + try writer.writeByte('?'); + try print(child.toType(), writer, mod); + return; + }, + .error_union_type => |error_union_type| { + try print(error_union_type.error_set_type.toType(), writer, mod); + try writer.writeByte('!'); + try print(error_union_type.payload_type.toType(), writer, mod); + return; + }, + .simple_type => |s| return writer.writeAll(@tagName(s)), + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, + }, + } + } + + pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { + if (self.ip_index != .none) return self.ip_index.toValue(); + switch (self.tag()) { + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, else => return Value.Tag.ty.create(allocator, self), } } @@ -1610,240 +1609,244 @@ pub const Type = struct { ignore_comptime_only: bool, strat: AbiAlignmentAdvancedStrat, ) RuntimeBitsError!bool { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type.bits != 0, - .ptr_type => |ptr_type| { + switch (ty.ip_index) { + // False because it is a comptime-only type. + .empty_struct_type => return false, + + .none => switch (ty.tag()) { + .error_set_inferred, + + .@"opaque", + .error_set_single, + .error_union, + .error_set, + .error_set_merged, + => return true, + // Pointers to zero-bit types still have a runtime address; however, pointers // to comptime-only types do not, with the exception of function pointers. - if (ignore_comptime_only) return true; - const child_ty = ptr_type.elem_type.toType(); - if (child_ty.zigTypeTag(mod) == .Fn) return !child_ty.fnInfo().is_generic; - if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty)); - return !comptimeOnly(ty, mod); - }, - .array_type => |array_type| { - if (array_type.sentinel != .none) { - return array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); - } else { - return array_type.len > 0 and - try array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); - } - }, - .vector_type => |vector_type| { - return vector_type.len > 0 and - try vector_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); - }, - .opt_type => |child| { - const child_ty = child.toType(); - if (child_ty.isNoReturn()) { - // Then the optional is comptime-known to be null. - return false; - } - if (ignore_comptime_only) { - return true; - } else if (strat == .sema) { - return !(try strat.sema.typeRequiresComptime(child_ty)); - } else { - return !comptimeOnly(child_ty, mod); - } - }, - .error_union_type => @panic("TODO"), - .simple_type => |t| return switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .bool, - .anyerror, - .@"anyframe", - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - => true, + .anyframe_T, + .pointer, + => { + if (ignore_comptime_only) { + return true; + } else if (ty.childType(mod).zigTypeTag(mod) == .Fn) { + return !ty.childType(mod).fnInfo().is_generic; + } else if (strat == .sema) { + return !(try strat.sema.typeRequiresComptime(ty)); + } else { + return !comptimeOnly(ty, mod); + } + }, // These are false because they are comptime-only types. - .void, - .type, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .enum_literal, - .type_info, - => false, + .empty_struct, + // These are function *bodies*, not pointers. + // Special exceptions have to be made when emitting functions due to + // this returning false. + .function, + => return false, - .generic_poison => unreachable, - .var_args_param => unreachable, - }, - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type - }; - switch (ty.tag()) { - .error_set_inferred, - - .@"opaque", - .error_set_single, - .error_union, - .error_set, - .error_set_merged, - => return true, - - // Pointers to zero-bit types still have a runtime address; however, pointers - // to comptime-only types do not, with the exception of function pointers. - .anyframe_T, - .pointer, - => { - if (ignore_comptime_only) { - return true; - } else if (ty.childType(mod).zigTypeTag(mod) == .Fn) { - return !ty.childType(mod).fnInfo().is_generic; - } else if (strat == .sema) { - return !(try strat.sema.typeRequiresComptime(ty)); - } else { - return !comptimeOnly(ty, mod); - } - }, + .optional => { + const child_ty = ty.optionalChild(mod); + if (child_ty.isNoReturn()) { + // Then the optional is comptime-known to be null. + return false; + } + if (ignore_comptime_only) { + return true; + } else if (strat == .sema) { + return !(try strat.sema.typeRequiresComptime(child_ty)); + } else { + return !comptimeOnly(child_ty, mod); + } + }, - // These are false because they are comptime-only types. - .empty_struct, - .empty_struct_literal, - // These are function *bodies*, not pointers. - // Special exceptions have to be made when emitting functions due to - // this returning false. - .function, - => return false, + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + if (struct_obj.status == .field_types_wip) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + struct_obj.assumed_runtime_bits = true; + return true; + } + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(struct_obj.haveFieldTypes()), + .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy, + } + for (struct_obj.fields.values()) |field| { + if (field.is_comptime) continue; + if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, - .optional => { - const child_ty = ty.optionalChild(mod); - if (child_ty.isNoReturn()) { - // Then the optional is comptime-known to be null. - return false; - } - if (ignore_comptime_only) { - return true; - } else if (strat == .sema) { - return !(try strat.sema.typeRequiresComptime(child_ty)); - } else { - return !comptimeOnly(child_ty, mod); - } - }, + .enum_full => { + const enum_full = ty.castTag(.enum_full).?.data; + return enum_full.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + }, + .enum_simple => { + const enum_simple = ty.castTag(.enum_simple).?.data; + return enum_simple.fields.count() >= 2; + }, + .enum_numbered, .enum_nonexhaustive => { + const int_tag_ty = try ty.intTagType(mod); + return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (struct_obj.status == .field_types_wip) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - struct_obj.assumed_runtime_bits = true; - return true; - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(struct_obj.haveFieldTypes()), - .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy, - } - for (struct_obj.fields.values()) |field| { - if (field.is_comptime) continue; - if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + .@"union" => { + const union_obj = ty.castTag(.@"union").?.data; + if (union_obj.status == .field_types_wip) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + union_obj.assumed_runtime_bits = true; return true; - } else { - return false; - } - }, + } + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(union_obj.haveFieldTypes()), + .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, + } + for (union_obj.fields.values()) |value| { + if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) { + return true; + } - .enum_full => { - const enum_full = ty.castTag(.enum_full).?.data; - return enum_full.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.fields.count() >= 2; - }, - .enum_numbered, .enum_nonexhaustive => { - const int_tag_ty = try ty.intTagType(mod); - return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); - }, + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(union_obj.haveFieldTypes()), + .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, + } + for (union_obj.fields.values()) |value| { + if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - if (union_obj.status == .field_types_wip) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - union_obj.assumed_runtime_bits = true; - return true; - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(union_obj.haveFieldTypes()), - .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, - } - for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) { - return true; - } + .array => return ty.arrayLen(mod) != 0 and + try ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .array_sentinel => return ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(union_obj.haveFieldTypes()), - .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, - } - for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { + .tuple, .anon_struct => { + const tuple = ty.tupleFields(); + for (tuple.types, 0..) |field_ty, i| { + const val = tuple.values[i]; + if (val.ip_index != .unreachable_value) continue; // comptime field + if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; + } return false; - } - }, - - .array => return ty.arrayLen(mod) != 0 and - try ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - .array_sentinel => return ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const val = tuple.values[i]; - if (val.ip_index != .unreachable_value) continue; // comptime field - if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; - } - return false; + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.bits != 0, + .ptr_type => |ptr_type| { + // Pointers to zero-bit types still have a runtime address; however, pointers + // to comptime-only types do not, with the exception of function pointers. + if (ignore_comptime_only) return true; + const child_ty = ptr_type.elem_type.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) return !child_ty.fnInfo().is_generic; + if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty)); + return !comptimeOnly(ty, mod); + }, + .array_type => |array_type| { + if (array_type.sentinel != .none) { + return array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + } else { + return array_type.len > 0 and + try array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + } + }, + .vector_type => |vector_type| { + return vector_type.len > 0 and + try vector_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + }, + .opt_type => |child| { + const child_ty = child.toType(); + if (child_ty.isNoReturn()) { + // Then the optional is comptime-known to be null. + return false; + } + if (ignore_comptime_only) { + return true; + } else if (strat == .sema) { + return !(try strat.sema.typeRequiresComptime(child_ty)); + } else { + return !comptimeOnly(child_ty, mod); + } + }, + .error_union_type => @panic("TODO"), + .simple_type => |t| return switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .anyerror, + .@"anyframe", + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => true, + + // These are false because they are comptime-only types. + .void, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + => false, + + .generic_poison => unreachable, + .var_args_param => unreachable, + }, + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type }, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, } } @@ -1851,104 +1854,107 @@ pub const Type = struct { /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { - if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => true, - .ptr_type => true, - .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), - .vector_type => true, - .opt_type => |child| child.toType().isPtrLikeOptional(mod), - .error_union_type => false, - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .bool, - .void, + return switch (ty.ip_index) { + .empty_struct_type => false, + + .none => switch (ty.tag()) { + .pointer, + .enum_numbered, => true, - .anyerror, - .@"anyframe", - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .enum_literal, - .type_info, - .generic_poison, + .error_set, + .error_set_single, + .error_set_inferred, + .error_set_merged, + .@"opaque", + // These are function bodies, not function pointers. + .function, + .enum_simple, + .error_union, + .anyframe_T, + .tuple, + .anon_struct, + .empty_struct, => false, - .var_args_param => unreachable, - }, - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type - }; - return switch (ty.tag()) { - .pointer, - .enum_numbered, - => true, + .enum_full, + .enum_nonexhaustive, + => !ty.cast(Payload.EnumFull).?.data.tag_ty_inferred, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .@"opaque", - // These are function bodies, not function pointers. - .function, - .enum_simple, - .error_union, - .anyframe_T, - .tuple, - .anon_struct, - .empty_struct_literal, - .empty_struct, - => false, + .inferred_alloc_mut => unreachable, + .inferred_alloc_const => unreachable, - .enum_full, - .enum_nonexhaustive, - => !ty.cast(Payload.EnumFull).?.data.tag_ty_inferred, + .array, + .array_sentinel, + => ty.childType(mod).hasWellDefinedLayout(mod), - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, + .optional => ty.isPtrLikeOptional(mod), + .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto, + .@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto, + .union_tagged => false, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => true, + .ptr_type => true, + .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), + .vector_type => true, + .opt_type => |child| child.toType().isPtrLikeOptional(mod), + .error_union_type => false, + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .void, + => true, - .array, - .array_sentinel, - => ty.childType(mod).hasWellDefinedLayout(mod), + .anyerror, + .@"anyframe", + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + .generic_poison, + => false, - .optional => ty.isPtrLikeOptional(mod), - .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto, - .@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto, - .union_tagged => false, + .var_args_param => unreachable, + }, + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }, }; } @@ -2120,232 +2126,232 @@ pub const Type = struct { else => null, }; - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| { - if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; - return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) }; - }, - .ptr_type => { - return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; - }, - .array_type => |array_type| { - return array_type.child.toType().abiAlignmentAdvanced(mod, strat); - }, - .vector_type => |vector_type| { - const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema); - const bits = @intCast(u32, bits_u64); - const bytes = ((bits * vector_type.len) + 7) / 8; - const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); - return AbiAlignmentAdvanced{ .scalar = alignment }; - }, + switch (ty.ip_index) { + .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, + .none => switch (ty.tag()) { + .@"opaque" => return AbiAlignmentAdvanced{ .scalar = 1 }, - .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), - .error_union_type => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), - .simple_type => |t| switch (t) { - .bool, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .anyopaque, - => return AbiAlignmentAdvanced{ .scalar = 1 }, + // represents machine code; not a pointer + .function => { + const alignment = ty.castTag(.function).?.data.alignment; + if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment }; + return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; + }, - .usize, - .isize, - .export_options, - .extern_options, - .@"anyframe", + .pointer, + .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, - .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, - .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) }, - .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) }, - .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) }, - .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) }, - .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) }, - .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) }, - .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) }, - .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - - .f16 => return AbiAlignmentAdvanced{ .scalar = 2 }, - .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) }, - .f64 => switch (target.c_type_bit_size(.double)) { - 64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) }, - else => return AbiAlignmentAdvanced{ .scalar = 8 }, - }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - else => { - const u80_ty: Type = .{ - .ip_index = .u80_type, - .legacy = undefined, - }; - return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) }; - }, - }, - .f128 => switch (target.c_type_bit_size(.longdouble)) { - 128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - else => return AbiAlignmentAdvanced{ .scalar = 16 }, - }, - // TODO revisit this when we have the concept of the error tag type - .anyerror => return AbiAlignmentAdvanced{ .scalar = 2 }, - - .void, - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - .type_info, - => return AbiAlignmentAdvanced{ .scalar = 0 }, - - .noreturn => unreachable, - .generic_poison => unreachable, - .var_args_param => unreachable, - }, - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type - }; - - switch (ty.tag()) { - .@"opaque" => return AbiAlignmentAdvanced{ .scalar = 1 }, - - // represents machine code; not a pointer - .function => { - const alignment = ty.castTag(.function).?.data.alignment; - if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment }; - return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; - }, - - .pointer, - .anyframe_T, - => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + .error_set_inferred, + .error_set_single, + .error_set, + .error_set_merged, + => return AbiAlignmentAdvanced{ .scalar = 2 }, - // TODO revisit this when we have the concept of the error tag type - .error_set_inferred, - .error_set_single, - .error_set, - .error_set_merged, - => return AbiAlignmentAdvanced{ .scalar = 2 }, - - .array, .array_sentinel => return ty.childType(mod).abiAlignmentAdvanced(mod, strat), + .array, .array_sentinel => return ty.childType(mod).abiAlignmentAdvanced(mod, strat), - .optional => return abiAlignmentAdvancedOptional(ty, mod, strat), - .error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), + .optional => return abiAlignmentAdvancedOptional(ty, mod, strat), + .error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (opt_sema) |sema| { - if (struct_obj.status == .field_types_wip) { - // We'll guess "pointer-aligned", if the struct has an - // underaligned pointer field then some allocations - // might require explicit alignment. - return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + if (opt_sema) |sema| { + if (struct_obj.status == .field_types_wip) { + // We'll guess "pointer-aligned", if the struct has an + // underaligned pointer field then some allocations + // might require explicit alignment. + return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; + } + _ = try sema.resolveTypeFields(ty); } - _ = try sema.resolveTypeFields(ty); - } - if (!struct_obj.haveFieldTypes()) switch (strat) { - .eager => unreachable, // struct layout not resolved - .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }; - if (struct_obj.layout == .Packed) { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; - } - }, - .eager => {}, + if (!struct_obj.haveFieldTypes()) switch (strat) { + .eager => unreachable, // struct layout not resolved + .sema => unreachable, // handled above + .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + }; + if (struct_obj.layout == .Packed) { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => |arena| { + if (!struct_obj.haveLayout()) { + return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; + } + }, + .eager => {}, + } + assert(struct_obj.haveLayout()); + return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) }; } - assert(struct_obj.haveLayout()); - return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) }; - } - - const fields = ty.structFields(); - var big_align: u32 = 0; - for (fields.values()) |field| { - if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, - else => |e| return e, - })) continue; - const field_align = if (field.abi_align != 0) - field.abi_align - else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |a| a, - .val => switch (strat) { - .eager => unreachable, // struct layout not resolved - .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }, - }; - big_align = @max(big_align, field_align); + const fields = ty.structFields(); + var big_align: u32 = 0; + for (fields.values()) |field| { + if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + else => |e| return e, + })) continue; + + const field_align = if (field.abi_align != 0) + field.abi_align + else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |a| a, + .val => switch (strat) { + .eager => unreachable, // struct layout not resolved + .sema => unreachable, // handled above + .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + }, + }; + big_align = @max(big_align, field_align); + + // This logic is duplicated in Module.Struct.Field.alignment. + if (struct_obj.layout == .Extern or target.ofmt == .c) { + if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { + // The C ABI requires 128 bit integer fields of structs + // to be 16-bytes aligned. + big_align = @max(big_align, 16); + } + } + } + return AbiAlignmentAdvanced{ .scalar = big_align }; + }, - // This logic is duplicated in Module.Struct.Field.alignment. - if (struct_obj.layout == .Extern or target.ofmt == .c) { - if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { - // The C ABI requires 128 bit integer fields of structs - // to be 16-bytes aligned. - big_align = @max(big_align, 16); + .tuple, .anon_struct => { + const tuple = ty.tupleFields(); + var big_align: u32 = 0; + for (tuple.types, 0..) |field_ty, i| { + const val = tuple.values[i]; + if (val.ip_index != .unreachable_value) continue; // comptime field + if (!(field_ty.hasRuntimeBits(mod))) continue; + + switch (try field_ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |field_align| big_align = @max(big_align, field_align), + .val => switch (strat) { + .eager => unreachable, // field type alignment not resolved + .sema => unreachable, // passed to abiAlignmentAdvanced above + .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + }, } } - } - return AbiAlignmentAdvanced{ .scalar = big_align }; - }, + return AbiAlignmentAdvanced{ .scalar = big_align }; + }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - var big_align: u32 = 0; - for (tuple.types, 0..) |field_ty, i| { - const val = tuple.values[i]; - if (val.ip_index != .unreachable_value) continue; // comptime field - if (!(field_ty.hasRuntimeBits(mod))) continue; + .enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => { + const int_tag_ty = try ty.intTagType(mod); + return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) }; + }, + .@"union" => { + const union_obj = ty.castTag(.@"union").?.data; + return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, false); + }, + .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, true); + }, + + .empty_struct => return AbiAlignmentAdvanced{ .scalar = 0 }, - switch (try field_ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |field_align| big_align = @max(big_align, field_align), - .val => switch (strat) { - .eager => unreachable, // field type alignment not resolved - .sema => unreachable, // passed to abiAlignmentAdvanced above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }, - } - } - return AbiAlignmentAdvanced{ .scalar = big_align }; + .inferred_alloc_const, + .inferred_alloc_mut, + => unreachable, }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; + return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) }; + }, + .ptr_type => { + return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; + }, + .array_type => |array_type| { + return array_type.child.toType().abiAlignmentAdvanced(mod, strat); + }, + .vector_type => |vector_type| { + const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema); + const bits = @intCast(u32, bits_u64); + const bytes = ((bits * vector_type.len) + 7) / 8; + const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); + return AbiAlignmentAdvanced{ .scalar = alignment }; + }, - .enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => { - const int_tag_ty = try ty.intTagType(mod); - return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) }; - }, - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, false); - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, true); - }, + .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), + .error_union_type => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .anyopaque, + => return AbiAlignmentAdvanced{ .scalar = 1 }, - .empty_struct, - .empty_struct_literal, - => return AbiAlignmentAdvanced{ .scalar = 0 }, + .usize, + .isize, + .export_options, + .extern_options, + .@"anyframe", + => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + + .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, + .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, + .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) }, + .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) }, + .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) }, + .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) }, + .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) }, + .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) }, + .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) }, + .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, + + .f16 => return AbiAlignmentAdvanced{ .scalar = 2 }, + .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) }, + .f64 => switch (target.c_type_bit_size(.double)) { + 64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) }, + else => return AbiAlignmentAdvanced{ .scalar = 8 }, + }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, + else => { + const u80_ty: Type = .{ + .ip_index = .u80_type, + .legacy = undefined, + }; + return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) }; + }, + }, + .f128 => switch (target.c_type_bit_size(.longdouble)) { + 128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, + else => return AbiAlignmentAdvanced{ .scalar = 16 }, + }, - .inferred_alloc_const, - .inferred_alloc_mut, - => unreachable, + // TODO revisit this when we have the concept of the error tag type + .anyerror => return AbiAlignmentAdvanced{ .scalar = 2 }, + + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => return AbiAlignmentAdvanced{ .scalar = 0 }, + + .noreturn => unreachable, + .generic_poison => unreachable, + .var_args_param => unreachable, + }, + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }, } } @@ -2506,255 +2512,256 @@ pub const Type = struct { ) Module.CompileError!AbiSizeAdvanced { const target = mod.getTarget(); - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| { - if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target) }; - }, - .ptr_type => |ptr_type| switch (ptr_type.size) { - .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, - else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - }, - .array_type => |array_type| { - const len = array_type.len + @boolToInt(array_type.sentinel != .none); - switch (try array_type.child.toType().abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| return .{ .scalar = len * elem_size }, - .val => switch (strat) { - .sema, .eager => unreachable, - .lazy => |arena| return .{ .val = try Value.Tag.lazy_size.create(arena, ty) }, - }, - } - }, - .vector_type => |vector_type| { - const opt_sema = switch (strat) { - .sema => |sema| sema, - .eager => null, - .lazy => |arena| return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(arena, ty), - }, - }; - const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); - const elem_bits = @intCast(u32, elem_bits_u64); - const total_bits = elem_bits * vector_type.len; - const total_bytes = (total_bits + 7) / 8; - const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |x| x, - .val => return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(strat.lazy, ty), - }, - }; - const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); - return AbiSizeAdvanced{ .scalar = result }; - }, + switch (ty.ip_index) { + .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, - .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), - .error_union_type => @panic("TODO"), - .simple_type => |t| switch (t) { - .bool, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - => return AbiSizeAdvanced{ .scalar = 1 }, - - .f16 => return AbiSizeAdvanced{ .scalar = 2 }, - .f32 => return AbiSizeAdvanced{ .scalar = 4 }, - .f64 => return AbiSizeAdvanced{ .scalar = 8 }, - .f128 => return AbiSizeAdvanced{ .scalar = 16 }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + .none => switch (ty.tag()) { + .function => unreachable, // represents machine code; not a pointer + .@"opaque" => unreachable, // no size available + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, + + .empty_struct => return AbiSizeAdvanced{ .scalar = 0 }, + + .@"struct", .tuple, .anon_struct => switch (ty.containerLayout()) { + .Packed => { + const struct_obj = ty.castTag(.@"struct").?.data; + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => |arena| { + if (!struct_obj.haveLayout()) { + return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; + } + }, + .eager => {}, + } + assert(struct_obj.haveLayout()); + return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) }; + }, else => { - const u80_ty: Type = .{ - .ip_index = .u80_type, - .legacy = undefined, - }; - return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => |arena| { + if (ty.castTag(.@"struct")) |payload| { + const struct_obj = payload.data; + if (!struct_obj.haveLayout()) { + return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; + } + } + }, + .eager => {}, + } + const field_count = ty.structFieldCount(); + if (field_count == 0) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, }, - .usize, - .isize, - .@"anyframe", - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, - .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, - .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, - .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, - .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, - .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, - .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, - .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, - .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, - .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - - .anyopaque, - .void, - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - => return AbiSizeAdvanced{ .scalar = 0 }, - - // TODO revisit this when we have the concept of the error tag type - .anyerror => return AbiSizeAdvanced{ .scalar = 2 }, - - .prefetch_options => unreachable, // missing call to resolveTypeFields - .export_options => unreachable, // missing call to resolveTypeFields - .extern_options => unreachable, // missing call to resolveTypeFields - - .type_info => unreachable, - .noreturn => unreachable, - .generic_poison => unreachable, - .var_args_param => unreachable, - }, - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type - }; - - switch (ty.tag()) { - .function => unreachable, // represents machine code; not a pointer - .@"opaque" => unreachable, // no size available - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - .empty_struct_literal, - .empty_struct, - => return AbiSizeAdvanced{ .scalar = 0 }, + .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { + const int_tag_ty = try ty.intTagType(mod); + return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) }; + }, + .@"union" => { + const union_obj = ty.castTag(.@"union").?.data; + return abiSizeAdvancedUnion(ty, mod, strat, union_obj, false); + }, + .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true); + }, - .@"struct", .tuple, .anon_struct => switch (ty.containerLayout()) { - .Packed => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } + .array => { + const payload = ty.castTag(.array).?.data; + switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = payload.len * elem_size }, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, }, - .eager => {}, } - assert(struct_obj.haveLayout()); - return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) }; }, - else => { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - } + .array_sentinel => { + const payload = ty.castTag(.array_sentinel).?.data; + switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = (payload.len + 1) * elem_size }, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, }, - .eager => {}, } - const field_count = ty.structFieldCount(); - if (field_count == 0) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, - }, - .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - const int_tag_ty = try ty.intTagType(mod); - return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) }; - }, - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - return abiSizeAdvancedUnion(ty, mod, strat, union_obj, false); - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true); - }, + .anyframe_T => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - .array => { - const payload = ty.castTag(.array).?.data; - switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = payload.len * elem_size }, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, - }, - } - }, - .array_sentinel => { - const payload = ty.castTag(.array_sentinel).?.data; - switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = (payload.len + 1) * elem_size }, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, - }, - } - }, + .pointer => switch (ty.castTag(.pointer).?.data.size) { + .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, + else => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + }, - .anyframe_T => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + // TODO revisit this when we have the concept of the error tag type + .error_set_inferred, + .error_set, + .error_set_merged, + .error_set_single, + => return AbiSizeAdvanced{ .scalar = 2 }, - .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, - else => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - }, + .optional => return ty.abiSizeAdvancedOptional(mod, strat), - // TODO revisit this when we have the concept of the error tag type - .error_set_inferred, - .error_set, - .error_set_merged, - .error_set_single, - => return AbiSizeAdvanced{ .scalar = 2 }, + .error_union => { + // This code needs to be kept in sync with the equivalent switch prong + // in abiAlignmentAdvanced. + const data = ty.castTag(.error_union).?.data; + const code_size = abiSize(Type.anyerror, mod); + if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, + else => |e| return e, + })) { + // Same as anyerror. + return AbiSizeAdvanced{ .scalar = code_size }; + } + const code_align = abiAlignment(Type.anyerror, mod); + const payload_align = abiAlignment(data.payload, mod); + const payload_size = switch (try data.payload.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| elem_size, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + }, + }; - .optional => return ty.abiSizeAdvancedOptional(mod, strat), + var size: u64 = 0; + if (code_align > payload_align) { + size += code_size; + size = std.mem.alignForwardGeneric(u64, size, payload_align); + size += payload_size; + size = std.mem.alignForwardGeneric(u64, size, code_align); + } else { + size += payload_size; + size = std.mem.alignForwardGeneric(u64, size, code_align); + size += code_size; + size = std.mem.alignForwardGeneric(u64, size, payload_align); + } + return AbiSizeAdvanced{ .scalar = size }; + }, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target) }; + }, + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, + else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + }, + .array_type => |array_type| { + const len = array_type.len + @boolToInt(array_type.sentinel != .none); + switch (try array_type.child.toType().abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| return .{ .scalar = len * elem_size }, + .val => switch (strat) { + .sema, .eager => unreachable, + .lazy => |arena| return .{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + }, + } + }, + .vector_type => |vector_type| { + const opt_sema = switch (strat) { + .sema => |sema| sema, + .eager => null, + .lazy => |arena| return AbiSizeAdvanced{ + .val = try Value.Tag.lazy_size.create(arena, ty), + }, + }; + const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); + const elem_bits = @intCast(u32, elem_bits_u64); + const total_bits = elem_bits * vector_type.len; + const total_bytes = (total_bits + 7) / 8; + const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| x, + .val => return AbiSizeAdvanced{ + .val = try Value.Tag.lazy_size.create(strat.lazy, ty), + }, + }; + const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); + return AbiSizeAdvanced{ .scalar = result }; + }, - .error_union => { - // This code needs to be kept in sync with the equivalent switch prong - // in abiAlignmentAdvanced. - const data = ty.castTag(.error_union).?.data; - const code_size = abiSize(Type.anyerror, mod); - if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, - else => |e| return e, - })) { - // Same as anyerror. - return AbiSizeAdvanced{ .scalar = code_size }; - } - const code_align = abiAlignment(Type.anyerror, mod); - const payload_align = abiAlignment(data.payload, mod); - const payload_size = switch (try data.payload.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| elem_size, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), + .error_union_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + => return AbiSizeAdvanced{ .scalar = 1 }, + + .f16 => return AbiSizeAdvanced{ .scalar = 2 }, + .f32 => return AbiSizeAdvanced{ .scalar = 4 }, + .f64 => return AbiSizeAdvanced{ .scalar = 8 }, + .f128 => return AbiSizeAdvanced{ .scalar = 16 }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + else => { + const u80_ty: Type = .{ + .ip_index = .u80_type, + .legacy = undefined, + }; + return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; + }, }, - }; - var size: u64 = 0; - if (code_align > payload_align) { - size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); - size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); - } else { - size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); - size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); - } - return AbiSizeAdvanced{ .scalar = size }; + .usize, + .isize, + .@"anyframe", + => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + + .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, + .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, + .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, + .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, + .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, + .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, + .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, + .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, + .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, + .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + + .anyopaque, + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + => return AbiSizeAdvanced{ .scalar = 0 }, + + // TODO revisit this when we have the concept of the error tag type + .anyerror => return AbiSizeAdvanced{ .scalar = 2 }, + + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + + .type_info => unreachable, + .noreturn => unreachable, + .generic_poison => unreachable, + .var_args_param => unreachable, + }, + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type }, } } @@ -2929,7 +2936,6 @@ pub const Type = struct { switch (ty.tag()) { .function => unreachable, // represents machine code; not a pointer .empty_struct => unreachable, - .empty_struct_literal => unreachable, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, .@"opaque" => unreachable, @@ -3490,12 +3496,16 @@ pub const Type = struct { } pub fn containerLayout(ty: Type) std.builtin.Type.ContainerLayout { - return switch (ty.tag()) { - .tuple, .empty_struct_literal, .anon_struct => .Auto, - .@"struct" => ty.castTag(.@"struct").?.data.layout, - .@"union" => ty.castTag(.@"union").?.data.layout, - .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.layout, - .union_tagged => ty.castTag(.union_tagged).?.data.layout, + return switch (ty.ip_index) { + .empty_struct_type => .Auto, + .none => switch (ty.tag()) { + .tuple, .anon_struct => .Auto, + .@"struct" => ty.castTag(.@"struct").?.data.layout, + .@"union" => ty.castTag(.@"union").?.data.layout, + .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.layout, + .union_tagged => ty.castTag(.union_tagged).?.data.layout, + else => unreachable, + }, else => unreachable, }; } @@ -3610,13 +3620,14 @@ pub const Type = struct { pub fn arrayLenIp(ty: Type, ip: InternPool) u64 { return switch (ty.ip_index) { + .empty_struct_type => 0, .none => switch (ty.tag()) { .array => ty.castTag(.array).?.data.len, .array_sentinel => ty.castTag(.array_sentinel).?.data.len, .tuple => ty.castTag(.tuple).?.data.types.len, .anon_struct => ty.castTag(.anon_struct).?.data.types.len, .@"struct" => ty.castTag(.@"struct").?.data.fields.count(), - .empty_struct, .empty_struct_literal => 0, + .empty_struct => 0, else => unreachable, }, @@ -3649,10 +3660,10 @@ pub const Type = struct { /// Asserts the type is an array, pointer or vector. pub fn sentinel(ty: Type, mod: *const Module) ?Value { return switch (ty.ip_index) { + .empty_struct_type => null, .none => switch (ty.tag()) { .array, .tuple, - .empty_struct_literal, .@"struct", => null, @@ -3951,197 +3962,200 @@ pub const Type = struct { pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { var ty = starting_type; - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| { - if (int_type.bits == 0) { - return try mod.intValue(ty, 0); - } else { - return null; - } - }, - .ptr_type => return null, - .array_type => |array_type| { - if (array_type.len == 0) - return Value.initTag(.empty_array); - if ((try array_type.child.toType().onePossibleValue(mod)) != null) - return Value.initTag(.the_only_possible_value); - return null; - }, - .vector_type => |vector_type| { - if (vector_type.len == 0) return Value.initTag(.empty_array); - if (try vector_type.child.toType().onePossibleValue(mod)) |v| return v; - return null; - }, - .opt_type => |child| { - if (child.toType().isNoReturn()) { - return Value.null; - } else { - return null; - } - }, - .error_union_type => return null, - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .type, - .anyerror, - .comptime_int, - .comptime_float, - .@"anyframe", - .enum_literal, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, + while (true) switch (ty.ip_index) { + .empty_struct_type => return Value.empty_struct, + + .none => switch (ty.tag()) { + .error_union, + .error_set_single, + .error_set, + .error_set_merged, + .function, + .array_sentinel, + .error_set_inferred, + .@"opaque", + .anyframe_T, + .pointer, => return null, - .void => return Value.void, - .noreturn => return Value.@"unreachable", - .null => return Value.null, - .undefined => return Value.undef, + .optional => { + const child_ty = ty.optionalChild(mod); + if (child_ty.isNoReturn()) { + return Value.null; + } else { + return null; + } + }, - .generic_poison => unreachable, - .var_args_param => unreachable, - }, - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type - }; + .@"struct" => { + const s = ty.castTag(.@"struct").?.data; + assert(s.haveFieldTypes()); + for (s.fields.values()) |field| { + if (field.is_comptime) continue; + if ((try field.ty.onePossibleValue(mod)) != null) continue; + return null; + } + return Value.empty_struct; + }, - while (true) switch (ty.tag()) { - .error_union, - .error_set_single, - .error_set, - .error_set_merged, - .function, - .array_sentinel, - .error_set_inferred, - .@"opaque", - .anyframe_T, - .pointer, - => return null, + .tuple, .anon_struct => { + const tuple = ty.tupleFields(); + for (tuple.values, 0..) |val, i| { + const is_comptime = val.ip_index != .unreachable_value; + if (is_comptime) continue; + if ((try tuple.types[i].onePossibleValue(mod)) != null) continue; + return null; + } + return Value.empty_struct; + }, + + .enum_numbered => { + const enum_numbered = ty.castTag(.enum_numbered).?.data; + // An explicit tag type is always provided for enum_numbered. + if (enum_numbered.tag_ty.hasRuntimeBits(mod)) { + return null; + } + assert(enum_numbered.fields.count() == 1); + return enum_numbered.values.keys()[0]; + }, + .enum_full => { + const enum_full = ty.castTag(.enum_full).?.data; + if (enum_full.tag_ty.hasRuntimeBits(mod)) { + return null; + } + switch (enum_full.fields.count()) { + 0 => return Value.@"unreachable", + 1 => if (enum_full.values.count() == 0) { + return try mod.intValue(ty, 0); // auto-numbered + } else { + return enum_full.values.keys()[0]; + }, + else => return null, + } + }, + .enum_simple => { + const enum_simple = ty.castTag(.enum_simple).?.data; + switch (enum_simple.fields.count()) { + 0 => return Value.@"unreachable", + 1 => return try mod.intValue(ty, 0), + else => return null, + } + }, + .enum_nonexhaustive => { + const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; + if (!tag_ty.hasRuntimeBits(mod)) { + return try mod.intValue(ty, 0); + } else { + return null; + } + }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; + if (union_obj.fields.count() == 0) return Value.@"unreachable"; + const only_field = union_obj.fields.values()[0]; + const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; + _ = tag_val; + _ = val_val; + return Value.empty_struct; + }, - .optional => { - const child_ty = ty.optionalChild(mod); - if (child_ty.isNoReturn()) { - return Value.null; - } else { - return null; - } - }, + .empty_struct => return Value.empty_struct, - .@"struct" => { - const s = ty.castTag(.@"struct").?.data; - assert(s.haveFieldTypes()); - for (s.fields.values()) |field| { - if (field.is_comptime) continue; - if ((try field.ty.onePossibleValue(mod)) != null) continue; + .array => { + if (ty.arrayLen(mod) == 0) + return Value.initTag(.empty_array); + if ((try ty.childType(mod).onePossibleValue(mod)) != null) + return Value.initTag(.the_only_possible_value); return null; - } - return Value.initTag(.empty_struct_value); - }, + }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |val, i| { - const is_comptime = val.ip_index != .unreachable_value; - if (is_comptime) continue; - if ((try tuple.types[i].onePossibleValue(mod)) != null) continue; - return null; - } - return Value.initTag(.empty_struct_value); + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, }, - - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - // An explicit tag type is always provided for enum_numbered. - if (enum_numbered.tag_ty.hasRuntimeBits(mod)) { + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return try mod.intValue(ty, 0); + } else { + return null; + } + }, + .ptr_type => return null, + .array_type => |array_type| { + if (array_type.len == 0) + return Value.initTag(.empty_array); + if ((try array_type.child.toType().onePossibleValue(mod)) != null) + return Value.initTag(.the_only_possible_value); return null; - } - assert(enum_numbered.fields.count() == 1); - return enum_numbered.values.keys()[0]; - }, - .enum_full => { - const enum_full = ty.castTag(.enum_full).?.data; - if (enum_full.tag_ty.hasRuntimeBits(mod)) { + }, + .vector_type => |vector_type| { + if (vector_type.len == 0) return Value.initTag(.empty_array); + if (try vector_type.child.toType().onePossibleValue(mod)) |v| return v; return null; - } - switch (enum_full.fields.count()) { - 0 => return Value.@"unreachable", - 1 => if (enum_full.values.count() == 0) { - return try mod.intValue(ty, 0); // auto-numbered + }, + .opt_type => |child| { + if (child.toType().isNoReturn()) { + return Value.null; } else { - return enum_full.values.keys()[0]; - }, - else => return null, - } - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - switch (enum_simple.fields.count()) { - 0 => return Value.@"unreachable", - 1 => return try mod.intValue(ty, 0), - else => return null, - } - }, - .enum_nonexhaustive => { - const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (!tag_ty.hasRuntimeBits(mod)) { - return try mod.intValue(ty, 0); - } else { - return null; - } - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; - if (union_obj.fields.count() == 0) return Value.@"unreachable"; - const only_field = union_obj.fields.values()[0]; - const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; - _ = tag_val; - _ = val_val; - return Value.initTag(.empty_struct_value); - }, + return null; + } + }, + .error_union_type => return null, + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .@"anyframe", + .enum_literal, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + => return null, - .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), + .void => return Value.void, + .noreturn => return Value.@"unreachable", + .null => return Value.null, + .undefined => return Value.undef, - .array => { - if (ty.arrayLen(mod) == 0) - return Value.initTag(.empty_array); - if ((try ty.childType(mod).onePossibleValue(mod)) != null) - return Value.initTag(.the_only_possible_value); - return null; + .generic_poison => unreachable, + .var_args_param => unreachable, + }, + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type }, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, }; } @@ -4150,159 +4164,161 @@ pub const Type = struct { /// TODO merge these implementations together with the "advanced" pattern seen /// elsewhere in this file. pub fn comptimeOnly(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => false, - .ptr_type => |ptr_type| { - const child_ty = ptr_type.elem_type.toType(); - if (child_ty.zigTypeTag(mod) == .Fn) { - return false; - } else { - return child_ty.comptimeOnly(mod); - } - }, - .array_type => |array_type| array_type.child.toType().comptimeOnly(mod), - .vector_type => |vector_type| vector_type.child.toType().comptimeOnly(mod), - .opt_type => |child| child.toType().comptimeOnly(mod), - .error_union_type => |error_union_type| error_union_type.payload_type.toType().comptimeOnly(mod), - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .anyopaque, - .bool, - .void, - .anyerror, - .@"anyframe", - .noreturn, - .generic_poison, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - => false, + return switch (ty.ip_index) { + .empty_struct_type => false, - .type, - .comptime_int, - .comptime_float, - .null, - .undefined, - .enum_literal, - .type_info, - => true, + .none => switch (ty.tag()) { + .empty_struct, + .error_set, + .error_set_single, + .error_set_inferred, + .error_set_merged, + .@"opaque", + .enum_simple, + => false, - .var_args_param => unreachable, - }, - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type - }; + // These are function bodies, not function pointers. + .function => true, - return switch (ty.tag()) { - .empty_struct_literal, - .empty_struct, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .@"opaque", - .enum_simple, - => false, + .inferred_alloc_mut => unreachable, + .inferred_alloc_const => unreachable, - // These are function bodies, not function pointers. - .function => true, + .array, + .array_sentinel, + => return ty.childType(mod).comptimeOnly(mod), - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, + .pointer => { + const child_ty = ty.childType(mod); + if (child_ty.zigTypeTag(mod) == .Fn) { + return false; + } else { + return child_ty.comptimeOnly(mod); + } + }, - .array, - .array_sentinel, - => return ty.childType(mod).comptimeOnly(mod), + .optional => { + return ty.optionalChild(mod).comptimeOnly(mod); + }, - .pointer => { - const child_ty = ty.childType(mod); - if (child_ty.zigTypeTag(mod) == .Fn) { + .tuple, .anon_struct => { + const tuple = ty.tupleFields(); + for (tuple.types, 0..) |field_ty, i| { + const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; + if (!have_comptime_val and field_ty.comptimeOnly(mod)) return true; + } return false; - } else { - return child_ty.comptimeOnly(mod); - } - }, + }, - .optional => { - return ty.optionalChild(mod).comptimeOnly(mod); - }, + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + switch (struct_obj.requires_comptime) { + .wip, .unknown => { + // Return false to avoid incorrect dependency loops. + // This will be handled correctly once merged with + // `Sema.typeRequiresComptime`. + return false; + }, + .no => return false, + .yes => return true, + } + }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; - if (!have_comptime_val and field_ty.comptimeOnly(mod)) return true; - } - return false; - }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + switch (union_obj.requires_comptime) { + .wip, .unknown => { + // Return false to avoid incorrect dependency loops. + // This will be handled correctly once merged with + // `Sema.typeRequiresComptime`. + return false; + }, + .no => return false, + .yes => return true, + } + }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .wip, .unknown => { - // Return false to avoid incorrect dependency loops. - // This will be handled correctly once merged with - // `Sema.typeRequiresComptime`. - return false; - }, - .no => return false, - .yes => return true, - } + .error_union => return ty.errorUnionPayload().comptimeOnly(mod), + .anyframe_T => { + const child_ty = ty.castTag(.anyframe_T).?.data; + return child_ty.comptimeOnly(mod); + }, + .enum_numbered => { + const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; + return tag_ty.comptimeOnly(mod); + }, + .enum_full, .enum_nonexhaustive => { + const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; + return tag_ty.comptimeOnly(mod); + }, }, - - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .wip, .unknown => { - // Return false to avoid incorrect dependency loops. - // This will be handled correctly once merged with - // `Sema.typeRequiresComptime`. + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => false, + .ptr_type => |ptr_type| { + const child_ty = ptr_type.elem_type.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) { return false; - }, - .no => return false, - .yes => return true, - } - }, + } else { + return child_ty.comptimeOnly(mod); + } + }, + .array_type => |array_type| array_type.child.toType().comptimeOnly(mod), + .vector_type => |vector_type| vector_type.child.toType().comptimeOnly(mod), + .opt_type => |child| child.toType().comptimeOnly(mod), + .error_union_type => |error_union_type| error_union_type.payload_type.toType().comptimeOnly(mod), + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .@"anyframe", + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, - .error_union => return ty.errorUnionPayload().comptimeOnly(mod), - .anyframe_T => { - const child_ty = ty.castTag(.anyframe_T).?.data; - return child_ty.comptimeOnly(mod); - }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return tag_ty.comptimeOnly(mod); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return tag_ty.comptimeOnly(mod); + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + + .var_args_param => unreachable, + }, + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type }, }; } @@ -4575,15 +4591,19 @@ pub const Type = struct { } pub fn structFields(ty: Type) Module.Struct.Fields { - switch (ty.tag()) { - .empty_struct, .empty_struct_literal => return .{}, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields; + return switch (ty.ip_index) { + .empty_struct_type => .{}, + .none => switch (ty.tag()) { + .empty_struct => .{}, + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + assert(struct_obj.haveFieldTypes()); + return struct_obj.fields; + }, + else => unreachable, }, else => unreachable, - } + }; } pub fn structFieldName(ty: Type, field_index: usize) []const u8 { @@ -4599,17 +4619,21 @@ pub const Type = struct { } pub fn structFieldCount(ty: Type) usize { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields.count(); + return switch (ty.ip_index) { + .empty_struct_type => 0, + .none => switch (ty.tag()) { + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + assert(struct_obj.haveFieldTypes()); + return struct_obj.fields.count(); + }, + .empty_struct => 0, + .tuple => ty.castTag(.tuple).?.data.types.len, + .anon_struct => ty.castTag(.anon_struct).?.data.types.len, + else => unreachable, }, - .empty_struct, .empty_struct_literal => return 0, - .tuple => return ty.castTag(.tuple).?.data.types.len, - .anon_struct => return ty.castTag(.anon_struct).?.data.types.len, else => unreachable, - } + }; } /// Supports structs and unions. @@ -4927,10 +4951,6 @@ pub const Type = struct { return ty.ip_index == .generic_poison_type; } - pub fn isVarArgsParam(ty: Type) bool { - return ty.ip_index == .none and ty.tag() == .var_args_param; - } - /// This enum does not directly correspond to `std.builtin.TypeId` because /// it has extra enum tags in it, as a way of using less memory. For example, /// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types @@ -4938,8 +4958,6 @@ pub const Type = struct { /// with different enum tags, because the the former requires more payload data than the latter. /// See `zigTypeTag` for the function that corresponds to `std.builtin.TypeId`. pub const Tag = enum(usize) { - /// Same as `empty_struct` except it has an empty namespace. - empty_struct_literal, /// This is a special value that tracks a set of types that have been stored /// to an inferred allocation. It does not support most of the normal type queries. /// However it does respond to `isConstPtr`, `ptrSize`, `zigTypeTag`, etc. @@ -4982,7 +5000,6 @@ pub const Type = struct { return switch (t) { .inferred_alloc_const, .inferred_alloc_mut, - .empty_struct_literal, => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"), .optional, @@ -5038,33 +5055,36 @@ pub const Type = struct { pub fn isTuple(ty: Type) bool { return switch (ty.ip_index) { + .empty_struct_type => true, .none => switch (ty.tag()) { - .tuple, .empty_struct_literal => true, + .tuple => true, .@"struct" => ty.castTag(.@"struct").?.data.is_tuple, else => false, }, - else => false, // TODO + else => false, // TODO struct }; } pub fn isAnonStruct(ty: Type) bool { return switch (ty.ip_index) { + .empty_struct_type => true, .none => switch (ty.tag()) { - .anon_struct, .empty_struct_literal => true, + .anon_struct => true, else => false, }, - else => false, // TODO + else => false, // TODO struct }; } pub fn isTupleOrAnonStruct(ty: Type) bool { return switch (ty.ip_index) { + .empty_struct_type => true, .none => switch (ty.tag()) { - .tuple, .empty_struct_literal, .anon_struct => true, + .tuple, .anon_struct => true, .@"struct" => ty.castTag(.@"struct").?.data.is_tuple, else => false, }, - else => false, // TODO + else => false, // TODO struct }; } @@ -5072,7 +5092,7 @@ pub const Type = struct { return switch (ty.ip_index) { .empty_struct => true, .none => switch (ty.tag()) { - .tuple, .empty_struct_literal => true, + .tuple => true, else => false, }, else => false, // TODO @@ -5083,7 +5103,7 @@ pub const Type = struct { return switch (ty.ip_index) { .empty_struct => true, .none => switch (ty.tag()) { - .tuple, .empty_struct_literal, .anon_struct => true, + .tuple, .anon_struct => true, else => false, }, else => false, @@ -5100,7 +5120,6 @@ pub const Type = struct { .types = ty.castTag(.anon_struct).?.data.types, .values = ty.castTag(.anon_struct).?.data.values, }, - .empty_struct_literal => .{ .types = &.{}, .values = &.{} }, else => unreachable, }, else => unreachable, @@ -5387,6 +5406,7 @@ pub const Type = struct { .ip_index = .const_slice_u8_sentinel_0_type, .legacy = undefined, }; + pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type, .legacy = undefined }; pub const generic_poison: Type = .{ .ip_index = .generic_poison_type, .legacy = undefined }; diff --git a/src/value.zig b/src/value.zig index d771f53a3ed9..537267754358 100644 --- a/src/value.zig +++ b/src/value.zig @@ -36,7 +36,6 @@ pub const Value = struct { /// The only possible value for a particular type, which is stored externally. the_only_possible_value, - empty_struct_value, empty_array, // See last_no_payload_tag below. // After this, the tag requires a payload. @@ -124,7 +123,6 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { .the_only_possible_value, - .empty_struct_value, .empty_array, => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), @@ -269,7 +267,6 @@ pub const Value = struct { } else switch (self.legacy.ptr_otherwise.tag) { .the_only_possible_value, .empty_array, - .empty_struct_value, => unreachable, .ty, .lazy_align, .lazy_size => { @@ -488,7 +485,6 @@ pub const Value = struct { } var val = start_val; while (true) switch (val.tag()) { - .empty_struct_value => return out_stream.writeAll("struct {}{}"), .aggregate => { return out_stream.writeAll("(aggregate)"); }, @@ -1914,7 +1910,7 @@ pub const Value = struct { const a_tag = a.tag(); const b_tag = b.tag(); if (a_tag == b_tag) switch (a_tag) { - .the_only_possible_value, .empty_struct_value => return true, + .the_only_possible_value => return true, .enum_literal => { const a_name = a.castTag(.enum_literal).?.data; const b_name = b.castTag(.enum_literal).?.data; @@ -2106,7 +2102,6 @@ pub const Value = struct { }, .Struct => { // A struct can be represented with one of: - // .empty_struct_value, // .the_one_possible_value, // .aggregate, // Note that we already checked above for matching tags, e.g. both .aggregate. @@ -2254,7 +2249,6 @@ pub const Value = struct { }, .Struct => { switch (val.tag()) { - .empty_struct_value => {}, .aggregate => { const field_values = val.castTag(.aggregate).?.data; for (field_values, 0..) |field_val, i| { @@ -2587,7 +2581,6 @@ pub const Value = struct { .none => switch (val.tag()) { // This is the case of accessing an element of an undef array. .empty_array => unreachable, // out of bounds array index - .empty_struct_value => unreachable, // out of bounds array index .empty_array_sentinel => { assert(index == 0); // The only valid index for an empty array with sentinel. @@ -2749,6 +2742,17 @@ pub const Value = struct { pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value { switch (val.ip_index) { .undef => return Value.undef, + .empty_struct => { + if (ty.isSimpleTupleOrAnonStruct()) { + const tuple = ty.tupleFields(); + return tuple.values[index]; + } + if (try ty.structFieldValueComptime(mod, index)) |some| { + return some; + } + unreachable; + }, + .none => switch (val.tag()) { .aggregate => { const field_values = val.castTag(.aggregate).?.data; @@ -2762,17 +2766,6 @@ pub const Value = struct { .the_only_possible_value => return (try ty.onePossibleValue(mod)).?, - .empty_struct_value => { - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - return tuple.values[index]; - } - if (try ty.structFieldValueComptime(mod, index)) |some| { - return some; - } - unreachable; - }, - else => unreachable, }, else => unreachable, @@ -5189,6 +5182,7 @@ pub const Value = struct { pub const generic_poison: Value = .{ .ip_index = .generic_poison, .legacy = undefined }; pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type, .legacy = undefined }; + pub const empty_struct: Value = .{ .ip_index = .empty_struct, .legacy = undefined }; pub fn makeBool(x: bool) Value { return if (x) Value.true else Value.false; From 8587e510e46f98e321fbad30bb235e5eed33f1ba Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 7 May 2023 22:25:50 -0700 Subject: [PATCH 051/205] stage2: more InternPool related fixes * make Sema.zirPtrType coerce the sentinel value against the element type * fix lazyAbiAlignment wrong result type * typeHasOnePossibleValue no longer tries to create interned enum tag value with integer zero, instead uses enum_field_index * Type.ptr avoids trying to store typed null values into the intern pool --- src/Sema.zig | 18 +++++++++++------- src/type.zig | 16 ++++++++++++---- src/value.zig | 8 ++++++++ 3 files changed, 31 insertions(+), 11 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 0085b16ae1e6..36049c83763b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -15615,7 +15615,7 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. => {}, } const val = try ty.lazyAbiSize(mod, sema.arena); - if (val.ip_index == .none and val.tag() == .lazy_size) { + if (val.isLazySize()) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); @@ -17674,6 +17674,10 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (ty.isGenericPoison()) return error.GenericPoison; break :blk ty; }; + + if (elem_ty.zigTypeTag(mod) == .NoReturn) + return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{}); + const target = sema.mod.getTarget(); var extra_i = extra.end; @@ -17681,7 +17685,9 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const sentinel = if (inst_data.flags.has_sentinel) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; - break :blk (try sema.resolveInstConst(block, sentinel_src, ref, "pointer sentinel value must be comptime-known")).val; + const coerced = try sema.coerce(block, elem_ty, try sema.resolveInst(ref), sentinel_src); + const val = try sema.resolveConstValue(block, sentinel_src, coerced, "pointer sentinel value must be comptime-known"); + break :blk val; } else null; const abi_align: u32 = if (inst_data.flags.has_align) blk: { @@ -17725,9 +17731,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, bitoffset_src, "bit offset starts after end of host integer", .{}); } - if (elem_ty.zigTypeTag(mod) == .NoReturn) { - return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{}); - } else if (elem_ty.zigTypeTag(mod) == .Fn) { + if (elem_ty.zigTypeTag(mod) == .Fn) { if (inst_data.size != .One) { return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{}); } @@ -18580,7 +18584,7 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } const val = try ty.lazyAbiAlignment(mod, sema.arena); - if (val.tag() == .lazy_align) { + if (val.isLazyAlign()) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); @@ -33056,7 +33060,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const enum_simple = resolved_ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { 0 => return Value.@"unreachable", - 1 => return try mod.intValue(ty, 0), + 1 => return try Value.Tag.enum_field_index.create(sema.arena, 0), else => return null, } }, diff --git a/src/type.zig b/src/type.zig index d784a25eb3d9..b088f58f0a1e 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2090,10 +2090,11 @@ pub const Type = struct { } /// May capture a reference to `ty`. + /// Returned value has type `comptime_int`. pub fn lazyAbiAlignment(ty: Type, mod: *Module, arena: Allocator) !Value { switch (try ty.abiAlignmentAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, - .scalar => |x| return mod.intValue(ty, x), + .scalar => |x| return mod.intValue(Type.comptime_int, x), } } @@ -5441,9 +5442,16 @@ pub const Type = struct { } } - if (d.pointee_type.ip_index != .none and - (d.sentinel == null or d.sentinel.?.ip_index != .none)) - { + ip: { + if (d.pointee_type.ip_index == .none) break :ip; + + if (d.sentinel) |s| { + switch (s.ip_index) { + .none, .null_value => break :ip, + else => {}, + } + } + return mod.ptrType(.{ .elem_type = d.pointee_type.ip_index, .sentinel = if (d.sentinel) |s| s.ip_index else .none, diff --git a/src/value.zig b/src/value.zig index 537267754358..855e7697670e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2637,6 +2637,14 @@ pub const Value = struct { } } + pub fn isLazyAlign(val: Value) bool { + return val.ip_index == .none and val.tag() == .lazy_align; + } + + pub fn isLazySize(val: Value) bool { + return val.ip_index == .none and val.tag() == .lazy_size; + } + pub fn isRuntimeValue(val: Value) bool { return val.ip_index == .none and val.tag() == .runtime_value; } From ad06b249b6e4253f553e7255dd5530172a14a70f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 7 May 2023 22:36:57 -0700 Subject: [PATCH 052/205] Sema: introduce Value.enum_field_0 and use it to fix typeHasOnePossibleValue logic in two different places. --- src/Sema.zig | 8 ++++---- src/type.zig | 6 +++--- src/value.zig | 10 ++++++++++ 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 36049c83763b..204fb798856d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -33031,7 +33031,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } if (enum_obj.fields.count() == 1) { if (enum_obj.values.count() == 0) { - return try mod.intValue(ty, 0); // auto-numbered + return Value.enum_field_0; // auto-numbered } else { return enum_obj.values.keys()[0]; } @@ -33048,7 +33048,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { switch (enum_obj.fields.count()) { 0 => return Value.@"unreachable", 1 => if (enum_obj.values.count() == 0) { - return try mod.intValue(ty, 0); // auto-numbered + return Value.enum_field_0; // auto-numbered } else { return enum_obj.values.keys()[0]; }, @@ -33060,14 +33060,14 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const enum_simple = resolved_ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { 0 => return Value.@"unreachable", - 1 => return try Value.Tag.enum_field_index.create(sema.arena, 0), + 1 => return Value.enum_field_0, else => return null, } }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { - return try mod.intValue(ty, 0); + return Value.enum_field_0; } else { return null; } diff --git a/src/type.zig b/src/type.zig index b088f58f0a1e..4ef5da57ea86 100644 --- a/src/type.zig +++ b/src/type.zig @@ -4027,7 +4027,7 @@ pub const Type = struct { switch (enum_full.fields.count()) { 0 => return Value.@"unreachable", 1 => if (enum_full.values.count() == 0) { - return try mod.intValue(ty, 0); // auto-numbered + return Value.enum_field_0; // auto-numbered } else { return enum_full.values.keys()[0]; }, @@ -4038,14 +4038,14 @@ pub const Type = struct { const enum_simple = ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { 0 => return Value.@"unreachable", - 1 => return try mod.intValue(ty, 0), + 1 => return Value.enum_field_0, else => return null, } }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; if (!tag_ty.hasRuntimeBits(mod)) { - return try mod.intValue(ty, 0); + return Value.enum_field_0; } else { return null; } diff --git a/src/value.zig b/src/value.zig index 855e7697670e..e381f4cd1792 100644 --- a/src/value.zig +++ b/src/value.zig @@ -5192,6 +5192,16 @@ pub const Value = struct { pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type, .legacy = undefined }; pub const empty_struct: Value = .{ .ip_index = .empty_struct, .legacy = undefined }; + pub const enum_field_0: Value = .{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &enum_field_0_payload.base }, + }; + + var enum_field_0_payload: Payload.U32 = .{ + .base = .{ .tag = .enum_field_index }, + .data = 0, + }; + pub fn makeBool(x: bool) Value { return if (x) Value.true else Value.false; } From fd674d95bee4815783bb282c80ba6af369296706 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 7 May 2023 22:46:59 -0700 Subject: [PATCH 053/205] InternPool: add indexToKey for empty struct types --- src/InternPool.zig | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 4f2e792a4968..a49b98bd50de 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -59,10 +59,8 @@ pub const Key = union(enum) { ty: Index, tag: BigIntConst, }, - struct_type: struct { - fields_len: u32, - // TODO move Module.Struct data to InternPool - }, + struct_type: StructType, + union_type: struct { fields_len: u32, // TODO move Module.Union data to InternPool @@ -111,6 +109,11 @@ pub const Key = union(enum) { child: Index, }; + pub const StructType = struct { + fields_len: u32, + // TODO move Module.Struct data to InternPool + }; + pub const Int = struct { ty: Index, storage: Storage, @@ -1058,7 +1061,11 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_error_union => @panic("TODO"), .type_enum_simple => @panic("TODO"), - .simple_internal => @panic("TODO"), + .simple_internal => switch (@intToEnum(SimpleInternal, data)) { + .type_empty_struct => .{ .struct_type = .{ + .fields_len = 0, + } }, + }, .int_u8 => .{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = data }, From 68b95a39b1fe734b938ec02fa2b16bbb63170f87 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 May 2023 11:51:32 -0700 Subject: [PATCH 054/205] InternPool: add ptr-to-int value Also modify coercion in Sema to be InternPool-aware by calling getCoerced. The unnecessary comptime logic in mod.intValue is deleted too --- src/InternPool.zig | 180 +++++++++++++++++++++++++++++++++------------ src/Module.zig | 18 +++-- src/Sema.zig | 39 +++++++--- src/type.zig | 46 +++++++++--- 4 files changed, 212 insertions(+), 71 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index a49b98bd50de..27d0fb944586 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -55,6 +55,7 @@ pub const Key = union(enum) { lib_name: u32, }, int: Key.Int, + ptr: Key.Ptr, enum_tag: struct { ty: Index, tag: BigIntConst, @@ -140,6 +141,16 @@ pub const Key = union(enum) { }; }; + pub const Ptr = struct { + ty: Index, + addr: Addr, + + pub const Addr = union(enum) { + decl: DeclIndex, + int: Index, + }; + }; + pub fn hash32(key: Key) u32 { return @truncate(u32, key.hash64()); } @@ -176,6 +187,16 @@ pub const Key = union(enum) { for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb); }, + .ptr => |ptr| { + std.hash.autoHash(hasher, ptr.ty); + // Int-to-ptr pointers are hashed separately than decl-referencing pointers. + // This is sound due to pointer province rules. + switch (ptr.addr) { + .int => |int| std.hash.autoHash(hasher, int), + .decl => @panic("TODO"), + } + }, + .enum_tag => |enum_tag| { std.hash.autoHash(hasher, enum_tag.ty); std.hash.autoHash(hasher, enum_tag.tag.positive); @@ -237,8 +258,30 @@ pub const Key = union(enum) { return std.meta.eql(a_info, b_info); }, + .ptr => |a_info| { + const b_info = b.ptr; + + if (a_info.ty != b_info.ty) + return false; + + return switch (a_info.addr) { + .int => |a_int| switch (b_info.addr) { + .int => |b_int| a_int == b_int, + .decl => false, + }, + .decl => |a_decl| switch (b_info.addr) { + .int => false, + .decl => |b_decl| a_decl == b_decl, + }, + }; + }, + .int => |a_info| { const b_info = b.int; + + if (a_info.ty != b_info.ty) + return false; + return switch (a_info.storage) { .u64 => |aa| switch (b_info.storage) { .u64 => |bb| aa == bb, @@ -298,9 +341,11 @@ pub const Key = union(enum) { .union_type, => return .type_type, - .int => |x| return x.ty, - .extern_func => |x| return x.ty, - .enum_tag => |x| return x.ty, + inline .ptr, + .int, + .extern_func, + .enum_tag, + => |x| return x.ty, .simple_value => |s| switch (s) { .undefined => return .undefined_type, @@ -724,6 +769,9 @@ pub const Tag = enum(u8) { /// only an enum tag, but will be presented via the API with a different Key. /// data is SimpleInternal enum value. simple_internal, + /// A pointer to an integer value. + /// data is extra index of PtrInt, which contains the type and address. + ptr_int, /// Type: u8 /// data is integer value int_u8, @@ -897,16 +945,13 @@ pub const Array = struct { child: Index, sentinel: Index, - pub const Length = packed struct(u64) { - len0: u32, - len1: u32, - }; + pub const Length = PackedU64; pub fn getLength(a: Array) u64 { - return @bitCast(u64, Length{ - .len0 = a.len0, - .len1 = a.len1, - }); + return (PackedU64{ + .a = a.len0, + .b = a.len1, + }).get(); } }; @@ -929,6 +974,24 @@ pub const EnumSimple = struct { fields_len: u32, }; +pub const PackedU64 = packed struct(u64) { + a: u32, + b: u32, + + pub fn get(x: PackedU64) u64 { + return @bitCast(u64, x); + } + + pub fn init(x: u64) PackedU64 { + return @bitCast(PackedU64, x); + } +}; + +pub const PtrInt = struct { + ty: Index, + addr: Index, +}; + /// Trailing: Limb for every limbs_len pub const Int = struct { ty: Index, @@ -1066,6 +1129,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .fields_len = 0, } }, }, + .ptr_int => { + const info = ip.extraData(PtrInt, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .int = info.addr }, + } }; + }, .int_u8 => .{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = data }, @@ -1188,12 +1258,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } } - const length = @bitCast(Array.Length, array_type.len); + const length = Array.Length.init(array_type.len); ip.items.appendAssumeCapacity(.{ .tag = .type_array_big, .data = try ip.addExtra(gpa, Array{ - .len0 = length.len0, - .len1 = length.len1, + .len0 = length.a, + .len1 = length.b, .child = array_type.child, .sentinel = array_type.sentinel, }), @@ -1237,6 +1307,20 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .extern_func => @panic("TODO"), + .ptr => |ptr| switch (ptr.addr) { + .decl => @panic("TODO"), + .int => |int| { + assert(ptr.ty != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_int, + .data = try ip.addExtra(gpa, PtrInt{ + .ty = ptr.ty, + .addr = int, + }), + }); + }, + }, + .int => |int| b: { switch (int.ty) { .none => unreachable, @@ -1620,38 +1704,43 @@ pub fn slicePtrType(ip: InternPool, i: Index) Index { } } -/// Given an existing integer value, returns the same numerical value but with -/// the supplied type. -pub fn getCoercedInt(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { - const key = ip.indexToKey(val); - // The key cannot be passed directly to `get`, otherwise in the case of - // big_int storage, the limbs would be invalidated before they are read. - // Here we pre-reserve the limbs to ensure that the logic in `addInt` will - // not use an invalidated limbs pointer. - switch (key.int.storage) { - .u64 => |x| return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = .{ .u64 = x }, - } }), - .i64 => |x| return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = .{ .i64 = x }, - } }), - - .big_int => |big_int| { - const positive = big_int.positive; - const limbs = ip.limbsSliceToIndex(big_int.limbs); - // This line invalidates the limbs slice, but the indexes computed in the - // previous line are still correct. - try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); - return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = .{ .big_int = .{ - .limbs = ip.limbsIndexToSlice(limbs), - .positive = positive, - } }, - } }); +/// Given an existing value, returns the same value but with the supplied type. +/// Only some combinations are allowed: +/// * int to int +pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { + switch (ip.indexToKey(val)) { + .int => |int| { + // The key cannot be passed directly to `get`, otherwise in the case of + // big_int storage, the limbs would be invalidated before they are read. + // Here we pre-reserve the limbs to ensure that the logic in `addInt` will + // not use an invalidated limbs pointer. + switch (int.storage) { + .u64 => |x| return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = .{ .u64 = x }, + } }), + .i64 => |x| return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = .{ .i64 = x }, + } }), + + .big_int => |big_int| { + const positive = big_int.positive; + const limbs = ip.limbsSliceToIndex(big_int.limbs); + // This line invalidates the limbs slice, but the indexes computed in the + // previous line are still correct. + try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); + return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = .{ .big_int = .{ + .limbs = ip.limbsIndexToSlice(limbs), + .positive = positive, + } }, + } }); + }, + } }, + else => unreachable, } } @@ -1708,6 +1797,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .simple_type => 0, .simple_value => 0, .simple_internal => 0, + .ptr_int => @sizeOf(PtrInt), .int_u8 => 0, .int_u16 => 0, .int_u32 => 0, diff --git a/src/Module.zig b/src/Module.zig index d06d22402a94..a5e61fa4f9ed 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6887,17 +6887,23 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); } +pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { + assert(ty.zigTypeTag(mod) == .Pointer); + const i = try intern(mod, .{ .ptr = .{ + .ty = ty.ip_index, + .addr = .{ .int = try intern(mod, .{ .int = .{ + .ty = ty.ip_index, + .storage = .{ .u64 = x }, + } }) }, + } }); + return i.toValue(); +} + pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { if (std.debug.runtime_safety) { - // TODO: decide if this also works for ABI int types like enums const tag = ty.zigTypeTag(mod); assert(tag == .Int or tag == .ComptimeInt); } - if (@TypeOf(x) == comptime_int) { - if (comptime std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); - if (comptime std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); - @compileError("Out-of-range comptime_int passed to Module.intValue"); - } if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); var limbs_buffer: [4]usize = undefined; diff --git a/src/Sema.zig b/src/Sema.zig index 204fb798856d..58c87db371d2 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -15096,7 +15096,7 @@ fn analyzePtrArithmetic( .ptr_sub => addr - elem_size * offset_int, else => unreachable, }; - const new_ptr_val = try mod.intValue(new_ptr_ty, new_addr); + const new_ptr_val = try mod.ptrIntValue(new_ptr_ty, new_addr); return sema.addConstant(new_ptr_ty, new_ptr_val); } if (air_tag == .ptr_sub) { @@ -19931,7 +19931,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)}); - return sema.addConstant(ptr_ty, try mod.intValue(ptr_ty, addr)); + return sema.addConstant(ptr_ty, try mod.ptrIntValue(ptr_ty, addr)); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -25640,8 +25640,13 @@ fn coerceExtra( var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (maybe_inst_val) |val| { - // Keep the comptime Value representation; take the new type. - return sema.addConstant(dest_ty, val); + if (val.ip_index == .none or val.ip_index == .null_value) { + // Keep the comptime Value representation; take the new type. + return sema.addConstant(dest_ty, val); + } else { + const new_val = try mod.intern_pool.getCoerced(mod.gpa, val.ip_index, dest_ty.ip_index); + return sema.addConstant(dest_ty, new_val.toValue()); + } } try sema.requireRuntimeBlock(block, inst_src, null); return block.addBitCast(dest_ty, inst); @@ -26014,7 +26019,7 @@ fn coerceExtra( if (!opts.report_err) return error.NotCoercible; return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }); } - const new_val = try mod.intern_pool.getCoercedInt(sema.gpa, val.ip_index, dest_ty.ip_index); + const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.ip_index, dest_ty.ip_index); return try sema.addConstant(dest_ty, new_val.toValue()); } if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { @@ -31673,10 +31678,13 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + + // values, not types .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type + .ptr => unreachable, + .enum_tag => unreachable, }, }; } @@ -33193,10 +33201,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + + // values, not types .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type + .ptr => unreachable, + .enum_tag => unreachable, }, } } @@ -33253,7 +33264,14 @@ pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { const result = Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); // This assertion can be removed when the `ty` parameter is removed from // this function thanks to the InternPool transition being complete. - assert(Type.eql(sema.typeOf(result), ty, sema.mod)); + if (std.debug.runtime_safety) { + const val_ty = sema.typeOf(result); + if (!Type.eql(val_ty, ty, sema.mod)) { + std.debug.panic("addConstant type mismatch: '{}' vs '{}'\n", .{ + ty.fmt(sema.mod), val_ty.fmt(sema.mod), + }); + } + } return result; } const ty_inst = try sema.addType(ty); @@ -33752,10 +33770,13 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + + // values, not types .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type + .ptr => unreachable, + .enum_tag => unreachable, }, }; } diff --git a/src/type.zig b/src/type.zig index 4ef5da57ea86..8fc4c20c4513 100644 --- a/src/type.zig +++ b/src/type.zig @@ -142,11 +142,12 @@ pub const Type = struct { .var_args_param => unreachable, }, - .extern_func, - .int, - .enum_tag, - .simple_value, - => unreachable, // it's a value, not a type + // values, not types + .extern_func => unreachable, + .int => unreachable, + .ptr => unreachable, + .enum_tag => unreachable, + .simple_value => unreachable, }, } } @@ -1576,6 +1577,7 @@ pub const Type = struct { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .ptr => unreachable, .enum_tag => unreachable, }, } @@ -1842,10 +1844,13 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + + // values, not types .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type + .ptr => unreachable, + .enum_tag => unreachable, }, } } @@ -1950,10 +1955,13 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + + // values, not types .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type + .ptr => unreachable, + .enum_tag => unreachable, }, }; } @@ -2348,10 +2356,13 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + + // values, not types .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type + .ptr => unreachable, + .enum_tag => unreachable, }, } } @@ -2759,10 +2770,13 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + + // values, not types .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type + .ptr => unreachable, + .enum_tag => unreachable, }, } } @@ -2926,10 +2940,13 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + + // values, not types .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type + .ptr => unreachable, + .enum_tag => unreachable, }; const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; @@ -3780,9 +3797,12 @@ pub const Type = struct { .simple_type => unreachable, // handled via Index enum tag above .struct_type => @panic("TODO"), .union_type => unreachable, + + // values, not types .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .ptr => unreachable, .enum_tag => unreachable, }, }; @@ -4152,10 +4172,13 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + + // values, not types .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type + .ptr => unreachable, + .enum_tag => unreachable, }, }; } @@ -4319,6 +4342,7 @@ pub const Type = struct { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .ptr => unreachable, .enum_tag => unreachable, // it's a value, not a type }, }; From e94a81c951905a6b5bcf2a6028589ac1e33d1edd Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 May 2023 13:00:21 -0700 Subject: [PATCH 055/205] InternPool: add optional values --- src/InternPool.zig | 118 ++++++++++++++++++++++++++++----------------- src/Module.zig | 22 ++++++++- src/Sema.zig | 3 ++ src/type.zig | 16 +++++- src/value.zig | 8 +++ 5 files changed, 122 insertions(+), 45 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 27d0fb944586..69037c389972 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -55,7 +55,8 @@ pub const Key = union(enum) { lib_name: u32, }, int: Key.Int, - ptr: Key.Ptr, + ptr: Ptr, + opt: Opt, enum_tag: struct { ty: Index, tag: BigIntConst, @@ -151,6 +152,13 @@ pub const Key = union(enum) { }; }; + /// `null` is represented by the `val` field being `none`. + pub const Opt = struct { + ty: Index, + /// This could be `none`, indicating the optional is `null`. + val: Index, + }; + pub fn hash32(key: Key) u32 { return @truncate(u32, key.hash64()); } @@ -175,6 +183,7 @@ pub const Key = union(enum) { .simple_type, .simple_value, .extern_func, + .opt, => |info| std.hash.autoHash(hasher, info), .int => |int| { @@ -257,6 +266,10 @@ pub const Key = union(enum) { const b_info = b.extern_func; return std.meta.eql(a_info, b_info); }, + .opt => |a_info| { + const b_info = b.opt; + return std.meta.eql(a_info, b_info); + }, .ptr => |a_info| { const b_info = b.ptr; @@ -343,6 +356,7 @@ pub const Key = union(enum) { inline .ptr, .int, + .opt, .extern_func, .enum_tag, => |x| return x.ty, @@ -771,7 +785,15 @@ pub const Tag = enum(u8) { simple_internal, /// A pointer to an integer value. /// data is extra index of PtrInt, which contains the type and address. + /// Only pointer types are allowed to have this encoding. Optional types must use + /// `opt_payload` or `opt_null`. ptr_int, + /// An optional value that is non-null. + /// data is Index of the payload value. + opt_payload, + /// An optional value that is null. + /// data is Index of the payload type. + opt_null, /// Type: u8 /// data is integer value int_u8, @@ -1129,6 +1151,14 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .fields_len = 0, } }, }, + .opt_null => .{ .opt = .{ + .ty = @intToEnum(Index, data), + .val = .none, + } }, + .opt_payload => .{ .opt = .{ + .ty = indexToKey(ip, @intToEnum(Index, data)).typeOf(), + .val = @intToEnum(Index, data), + } }, .ptr_int => { const info = ip.extraData(PtrInt, data); return .{ .ptr = .{ @@ -1321,6 +1351,17 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, }, + .opt => |opt| { + assert(opt.ty != .none); + ip.items.appendAssumeCapacity(if (opt.val == .none) .{ + .tag = .opt_null, + .data = @enumToInt(opt.ty), + } else .{ + .tag = .opt_payload, + .data = @enumToInt(opt.val), + }); + }, + .int => |int| b: { switch (int.ty) { .none => unreachable, @@ -1342,62 +1383,51 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .u16_type => switch (int.storage) { .big_int => |big_int| { - if (big_int.to(u32)) |casted| { - ip.items.appendAssumeCapacity(.{ - .tag = .int_u16, - .data = casted, - }); - break :b; - } else |_| {} + ip.items.appendAssumeCapacity(.{ + .tag = .int_u16, + .data = big_int.to(u16) catch unreachable, + }); + break :b; }, inline .u64, .i64 => |x| { - if (std.math.cast(u32, x)) |casted| { - ip.items.appendAssumeCapacity(.{ - .tag = .int_u16, - .data = casted, - }); - break :b; - } + ip.items.appendAssumeCapacity(.{ + .tag = .int_u16, + .data = @intCast(u16, x), + }); + break :b; }, }, .u32_type => switch (int.storage) { .big_int => |big_int| { - if (big_int.to(u32)) |casted| { - ip.items.appendAssumeCapacity(.{ - .tag = .int_u32, - .data = casted, - }); - break :b; - } else |_| {} + ip.items.appendAssumeCapacity(.{ + .tag = .int_u32, + .data = big_int.to(u32) catch unreachable, + }); + break :b; }, inline .u64, .i64 => |x| { - if (std.math.cast(u32, x)) |casted| { - ip.items.appendAssumeCapacity(.{ - .tag = .int_u32, - .data = casted, - }); - break :b; - } + ip.items.appendAssumeCapacity(.{ + .tag = .int_u32, + .data = @intCast(u32, x), + }); + break :b; }, }, .i32_type => switch (int.storage) { .big_int => |big_int| { - if (big_int.to(i32)) |casted| { - ip.items.appendAssumeCapacity(.{ - .tag = .int_i32, - .data = @bitCast(u32, casted), - }); - break :b; - } else |_| {} + const casted = big_int.to(i32) catch unreachable; + ip.items.appendAssumeCapacity(.{ + .tag = .int_i32, + .data = @bitCast(u32, casted), + }); + break :b; }, inline .u64, .i64 => |x| { - if (std.math.cast(i32, x)) |casted| { - ip.items.appendAssumeCapacity(.{ - .tag = .int_i32, - .data = @bitCast(u32, casted), - }); - break :b; - } + ip.items.appendAssumeCapacity(.{ + .tag = .int_i32, + .data = @bitCast(u32, @intCast(i32, x)), + }); + break :b; }, }, .usize_type => switch (int.storage) { @@ -1798,6 +1828,8 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .simple_value => 0, .simple_internal => 0, .ptr_int => @sizeOf(PtrInt), + .opt_null => 0, + .opt_payload => 0, .int_u8 => 0, .int_u16 => 0, .int_u32 => 0, diff --git a/src/Module.zig b/src/Module.zig index a5e61fa4f9ed..dc7e34adc360 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6887,12 +6887,32 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); } +/// Supports optionals in addition to pointers. pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { + if (ty.isPtrLikeOptional(mod)) { + const i = try intern(mod, .{ .opt = .{ + .ty = ty.ip_index, + .val = try intern(mod, .{ .ptr = .{ + .ty = ty.childType(mod).ip_index, + .addr = .{ .int = try intern(mod, .{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = x }, + } }) }, + } }), + } }); + return i.toValue(); + } else { + return ptrIntValue_ptronly(mod, ty, x); + } +} + +/// Supports only pointers. See `ptrIntValue` for pointer-like optional support. +pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { assert(ty.zigTypeTag(mod) == .Pointer); const i = try intern(mod, .{ .ptr = .{ .ty = ty.ip_index, .addr = .{ .int = try intern(mod, .{ .int = .{ - .ty = ty.ip_index, + .ty = .usize_type, .storage = .{ .u64 = x }, } }) }, } }); diff --git a/src/Sema.zig b/src/Sema.zig index 58c87db371d2..3d673246732e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -31684,6 +31684,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .extern_func => unreachable, .int => unreachable, .ptr => unreachable, + .opt => unreachable, .enum_tag => unreachable, }, }; @@ -33207,6 +33208,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .extern_func => unreachable, .int => unreachable, .ptr => unreachable, + .opt => unreachable, .enum_tag => unreachable, }, } @@ -33776,6 +33778,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .extern_func => unreachable, .int => unreachable, .ptr => unreachable, + .opt => unreachable, .enum_tag => unreachable, }, }; diff --git a/src/type.zig b/src/type.zig index 8fc4c20c4513..e7dad914221a 100644 --- a/src/type.zig +++ b/src/type.zig @@ -146,6 +146,7 @@ pub const Type = struct { .extern_func => unreachable, .int => unreachable, .ptr => unreachable, + .opt => unreachable, .enum_tag => unreachable, .simple_value => unreachable, }, @@ -1574,10 +1575,13 @@ pub const Type = struct { .simple_type => |s| return writer.writeAll(@tagName(s)), .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + + // values, not types .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, .ptr => unreachable, + .opt => unreachable, .enum_tag => unreachable, }, } @@ -1850,6 +1854,7 @@ pub const Type = struct { .extern_func => unreachable, .int => unreachable, .ptr => unreachable, + .opt => unreachable, .enum_tag => unreachable, }, } @@ -1961,6 +1966,7 @@ pub const Type = struct { .extern_func => unreachable, .int => unreachable, .ptr => unreachable, + .opt => unreachable, .enum_tag => unreachable, }, }; @@ -2362,6 +2368,7 @@ pub const Type = struct { .extern_func => unreachable, .int => unreachable, .ptr => unreachable, + .opt => unreachable, .enum_tag => unreachable, }, } @@ -2776,6 +2783,7 @@ pub const Type = struct { .extern_func => unreachable, .int => unreachable, .ptr => unreachable, + .opt => unreachable, .enum_tag => unreachable, }, } @@ -2946,6 +2954,7 @@ pub const Type = struct { .extern_func => unreachable, .int => unreachable, .ptr => unreachable, + .opt => unreachable, .enum_tag => unreachable, }; @@ -3803,6 +3812,7 @@ pub const Type = struct { .extern_func => unreachable, .int => unreachable, .ptr => unreachable, + .opt => unreachable, .enum_tag => unreachable, }, }; @@ -4178,6 +4188,7 @@ pub const Type = struct { .extern_func => unreachable, .int => unreachable, .ptr => unreachable, + .opt => unreachable, .enum_tag => unreachable, }, }; @@ -4339,11 +4350,14 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + + // values, not types .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, .ptr => unreachable, - .enum_tag => unreachable, // it's a value, not a type + .opt => unreachable, + .enum_tag => unreachable, }, }; } diff --git a/src/value.zig b/src/value.zig index e381f4cd1792..c95f218dbe44 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2336,6 +2336,14 @@ pub const Value = struct { // The value is runtime-known and shouldn't affect the hash. if (val.isRuntimeValue()) return; + if (val.ip_index != .none) { + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. + std.hash.autoHash(hasher, val.ip_index); + return; + } + switch (ty.zigTypeTag(mod)) { .Opaque => unreachable, // Cannot hash opaque types .Void, From 275652f620541919087bc92da0d2f9e97c66d3c0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 May 2023 16:52:59 -0700 Subject: [PATCH 056/205] stage2: move opaque types to InternPool --- src/Compilation.zig | 24 +- src/InternPool.zig | 73 ++-- src/Module.zig | 395 +++++++++++++--------- src/Sema.zig | 637 ++++++++++++++++++----------------- src/arch/wasm/CodeGen.zig | 5 +- src/arch/wasm/Emit.zig | 2 +- src/arch/x86_64/CodeGen.zig | 15 +- src/codegen/c.zig | 6 +- src/codegen/c/type.zig | 8 +- src/codegen/llvm.zig | 66 ++-- src/codegen/spirv.zig | 11 +- src/codegen/spirv/Module.zig | 4 +- src/crash_report.zig | 8 +- src/link.zig | 4 +- src/link/Coff.zig | 67 ++-- src/link/Dwarf.zig | 2 +- src/link/Elf.zig | 65 ++-- src/link/MachO.zig | 84 ++--- src/link/Plan9.zig | 32 +- src/link/Wasm.zig | 14 +- src/type.zig | 221 ++++++------ 21 files changed, 935 insertions(+), 808 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 75af9362f69e..6291ce78d469 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2048,7 +2048,7 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void assert(decl.deletion_flag); assert(decl.dependants.count() == 0); const is_anon = if (decl.zir_decl_index == 0) blk: { - break :blk decl.src_namespace.anon_decls.swapRemove(decl_index); + break :blk module.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index); } else false; try module.clearDecl(decl_index, null); @@ -2530,8 +2530,7 @@ pub fn totalErrorCount(self: *Compilation) u32 { // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. for (module.failed_decls.keys()) |key| { - const decl = module.declPtr(key); - if (decl.getFileScope().okToReportErrors()) { + if (module.declFileScope(key).okToReportErrors()) { total += 1; if (module.cimport_errors.get(key)) |errors| { total += errors.len; @@ -2540,8 +2539,7 @@ pub fn totalErrorCount(self: *Compilation) u32 { } if (module.emit_h) |emit_h| { for (emit_h.failed_decls.keys()) |key| { - const decl = module.declPtr(key); - if (decl.getFileScope().okToReportErrors()) { + if (module.declFileScope(key).okToReportErrors()) { total += 1; } } @@ -2644,10 +2642,10 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { { var it = module.failed_decls.iterator(); while (it.next()) |entry| { - const decl = module.declPtr(entry.key_ptr.*); + const decl_index = entry.key_ptr.*; // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (decl.getFileScope().okToReportErrors()) { + if (module.declFileScope(decl_index).okToReportErrors()) { try addModuleErrorMsg(&bundle, entry.value_ptr.*.*); if (module.cimport_errors.get(entry.key_ptr.*)) |cimport_errors| for (cimport_errors) |c_error| { try bundle.addRootErrorMessage(.{ @@ -2669,10 +2667,10 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { if (module.emit_h) |emit_h| { var it = emit_h.failed_decls.iterator(); while (it.next()) |entry| { - const decl = module.declPtr(entry.key_ptr.*); + const decl_index = entry.key_ptr.*; // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (decl.getFileScope().okToReportErrors()) { + if (module.declFileScope(decl_index).okToReportErrors()) { try addModuleErrorMsg(&bundle, entry.value_ptr.*.*); } } @@ -2710,7 +2708,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { const values = module.compile_log_decls.values(); // First one will be the error; subsequent ones will be notes. const err_decl = module.declPtr(keys[0]); - const src_loc = err_decl.nodeOffsetSrcLoc(values[0]); + const src_loc = err_decl.nodeOffsetSrcLoc(values[0], module); const err_msg = Module.ErrorMsg{ .src_loc = src_loc, .msg = "found compile log statement", @@ -2721,7 +2719,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { for (keys[1..], 0..) |key, i| { const note_decl = module.declPtr(key); err_msg.notes[i] = .{ - .src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1]), + .src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1], module), .msg = "also here", }; } @@ -3235,7 +3233,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v try module.failed_decls.ensureUnusedCapacity(gpa, 1); module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(module), "unable to update line number: {s}", .{@errorName(err)}, )); @@ -3848,7 +3846,7 @@ fn reportRetryableEmbedFileError( const mod = comp.bin_file.options.module.?; const gpa = mod.gpa; - const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc(); + const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc(mod); const err_msg = if (embed_file.pkg.root_src_directory.path) |dir_path| try Module.ErrorMsg.create( diff --git a/src/InternPool.zig b/src/InternPool.zig index 69037c389972..3708e21ef6c2 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -17,7 +17,8 @@ const BigIntMutable = std.math.big.int.Mutable; const Limb = std.math.big.Limb; const InternPool = @This(); -const DeclIndex = enum(u32) { _ }; +const DeclIndex = @import("Module.zig").Decl.Index; +const NamespaceIndex = @import("Module.zig").Namespace.Index; const KeyAdapter = struct { intern_pool: *const InternPool, @@ -48,7 +49,7 @@ pub const Key = union(enum) { extern_func: struct { ty: Index, /// The Decl that corresponds to the function itself. - owner_decl: DeclIndex, + decl: DeclIndex, /// Library name if specified. /// For example `extern "c" fn write(...) usize` would have 'c' as library name. /// Index into the string table bytes. @@ -62,6 +63,7 @@ pub const Key = union(enum) { tag: BigIntConst, }, struct_type: StructType, + opaque_type: OpaqueType, union_type: struct { fields_len: u32, @@ -116,6 +118,13 @@ pub const Key = union(enum) { // TODO move Module.Struct data to InternPool }; + pub const OpaqueType = struct { + /// The Decl that corresponds to the opaque itself. + decl: DeclIndex, + /// Represents the declarations inside this opaque. + namespace: NamespaceIndex, + }; + pub const Int = struct { ty: Index, storage: Storage, @@ -221,6 +230,7 @@ pub const Key = union(enum) { _ = union_type; @panic("TODO"); }, + .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), } } @@ -338,6 +348,11 @@ pub const Key = union(enum) { _ = b_info; @panic("TODO"); }, + + .opaque_type => |a_info| { + const b_info = b.opaque_type; + return a_info.decl == b_info.decl; + }, } } @@ -352,6 +367,7 @@ pub const Key = union(enum) { .simple_type, .struct_type, .union_type, + .opaque_type, => return .type_type, inline .ptr, @@ -770,10 +786,13 @@ pub const Tag = enum(u8) { /// are auto-numbered, and there are no declarations. /// data is payload index to `EnumSimple`. type_enum_simple, - /// A type that can be represented with only an enum tag. /// data is SimpleType enum value. simple_type, + /// An opaque type. + /// data is index of Key.OpaqueType in extra. + type_opaque, + /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, @@ -986,7 +1005,7 @@ pub const ErrorUnion = struct { /// 0. field name: null-terminated string index for each fields_len; declaration order pub const EnumSimple = struct { /// The Decl that corresponds to the enum itself. - owner_decl: DeclIndex, + decl: DeclIndex, /// An integer type which is used for the numerical value of the enum. This /// is inferred by Zig to be the smallest power of two unsigned int that /// fits the number of fields. It is stored here to avoid unnecessary @@ -1146,6 +1165,9 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_error_union => @panic("TODO"), .type_enum_simple => @panic("TODO"), + + .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, + .simple_internal => switch (@intToEnum(SimpleInternal, data)) { .type_empty_struct => .{ .struct_type = .{ .fields_len = 0, @@ -1335,6 +1357,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = @enumToInt(simple_value), }); }, + + .struct_type => |struct_type| { + if (struct_type.fields_len != 0) { + @panic("TODO"); // handle structs other than empty_struct + } + ip.items.appendAssumeCapacity(.{ + .tag = .simple_internal, + .data = @enumToInt(SimpleInternal.type_empty_struct), + }); + }, + + .union_type => |union_type| { + _ = union_type; + @panic("TODO"); + }, + + .opaque_type => |opaque_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_opaque, + .data = try ip.addExtra(gpa, opaque_type), + }); + }, + .extern_func => @panic("TODO"), .ptr => |ptr| switch (ptr.addr) { @@ -1504,21 +1549,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const tag: Tag = if (enum_tag.tag.positive) .enum_tag_positive else .enum_tag_negative; try addInt(ip, gpa, enum_tag.ty, tag, enum_tag.tag.limbs); }, - - .struct_type => |struct_type| { - if (struct_type.fields_len != 0) { - @panic("TODO"); // handle structs other than empty_struct - } - ip.items.appendAssumeCapacity(.{ - .tag = .simple_internal, - .data = @enumToInt(SimpleInternal.type_empty_struct), - }); - }, - - .union_type => |union_type| { - _ = union_type; - @panic("TODO"); - }, } return @intToEnum(Index, ip.items.len - 1); } @@ -1548,6 +1578,8 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { ip.extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), Index => @enumToInt(@field(extra, field.name)), + DeclIndex => @enumToInt(@field(extra, field.name)), + NamespaceIndex => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), Pointer.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), @@ -1603,6 +1635,8 @@ fn extraData(ip: InternPool, comptime T: type, index: usize) T { @field(result, field.name) = switch (field.type) { u32 => int32, Index => @intToEnum(Index, int32), + DeclIndex => @intToEnum(DeclIndex, int32), + NamespaceIndex => @intToEnum(NamespaceIndex, int32), i32 => @bitCast(i32, int32), Pointer.Flags => @bitCast(Pointer.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), @@ -1824,6 +1858,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_optional => 0, .type_error_union => @sizeOf(ErrorUnion), .type_enum_simple => @sizeOf(EnumSimple), + .type_opaque => @sizeOf(Key.OpaqueType), .simple_type => 0, .simple_value => 0, .simple_internal => 0, diff --git a/src/Module.zig b/src/Module.zig index dc7e34adc360..7521d4d43928 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -185,6 +185,11 @@ allocated_decls: std.SegmentedList(Decl, 0) = .{}, /// When a Decl object is freed from `allocated_decls`, it is pushed into this stack. decls_free_list: ArrayListUnmanaged(Decl.Index) = .{}, +/// Same pattern as with `allocated_decls`. +allocated_namespaces: std.SegmentedList(Namespace, 0) = .{}, +/// Same pattern as with `decls_free_list`. +namespaces_free_list: ArrayListUnmanaged(Namespace.Index) = .{}, + global_assembly: std.AutoHashMapUnmanaged(Decl.Index, []u8) = .{}, reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { @@ -363,7 +368,7 @@ pub const Export = struct { pub fn getSrcLoc(exp: Export, mod: *Module) SrcLoc { const src_decl = mod.declPtr(exp.src_decl); return .{ - .file_scope = src_decl.getFileScope(), + .file_scope = src_decl.getFileScope(mod), .parent_decl_node = src_decl.src_node, .lazy = exp.src, }; @@ -494,7 +499,7 @@ pub const Decl = struct { /// Reference to externally owned memory. /// In the case of the Decl corresponding to a file, this is /// the namespace of the struct, since there is no parent. - src_namespace: *Namespace, + src_namespace: Namespace.Index, /// The scope which lexically contains this decl. A decl must depend /// on its lexical parent, in order to ensure that this pointer is valid. @@ -691,8 +696,8 @@ pub const Decl = struct { /// This name is relative to the containing namespace of the decl. /// The memory is owned by the containing File ZIR. - pub fn getName(decl: Decl) ?[:0]const u8 { - const zir = decl.getFileScope().zir; + pub fn getName(decl: Decl, mod: *Module) ?[:0]const u8 { + const zir = decl.getFileScope(mod).zir; return decl.getNameZir(zir); } @@ -703,8 +708,8 @@ pub const Decl = struct { return zir.nullTerminatedString(name_index); } - pub fn contentsHash(decl: Decl) std.zig.SrcHash { - const zir = decl.getFileScope().zir; + pub fn contentsHash(decl: Decl, mod: *Module) std.zig.SrcHash { + const zir = decl.getFileScope(mod).zir; return decl.contentsHashZir(zir); } @@ -715,31 +720,31 @@ pub const Decl = struct { return contents_hash; } - pub fn zirBlockIndex(decl: *const Decl) Zir.Inst.Index { + pub fn zirBlockIndex(decl: *const Decl, mod: *Module) Zir.Inst.Index { assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; return zir.extra[decl.zir_decl_index + 6]; } - pub fn zirAlignRef(decl: Decl) Zir.Inst.Ref { + pub fn zirAlignRef(decl: Decl, mod: *Module) Zir.Inst.Ref { if (!decl.has_align) return .none; assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; return @intToEnum(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 8]); } - pub fn zirLinksectionRef(decl: Decl) Zir.Inst.Ref { + pub fn zirLinksectionRef(decl: Decl, mod: *Module) Zir.Inst.Ref { if (!decl.has_linksection_or_addrspace) return .none; assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; const extra_index = decl.zir_decl_index + 8 + @boolToInt(decl.has_align); return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); } - pub fn zirAddrspaceRef(decl: Decl) Zir.Inst.Ref { + pub fn zirAddrspaceRef(decl: Decl, mod: *Module) Zir.Inst.Ref { if (!decl.has_linksection_or_addrspace) return .none; assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; const extra_index = decl.zir_decl_index + 8 + @boolToInt(decl.has_align) + 1; return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); } @@ -764,25 +769,25 @@ pub const Decl = struct { return LazySrcLoc.nodeOffset(decl.nodeIndexToRelative(node_index)); } - pub fn srcLoc(decl: Decl) SrcLoc { - return decl.nodeOffsetSrcLoc(0); + pub fn srcLoc(decl: Decl, mod: *Module) SrcLoc { + return decl.nodeOffsetSrcLoc(0, mod); } - pub fn nodeOffsetSrcLoc(decl: Decl, node_offset: i32) SrcLoc { + pub fn nodeOffsetSrcLoc(decl: Decl, node_offset: i32, mod: *Module) SrcLoc { return .{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = LazySrcLoc.nodeOffset(node_offset), }; } - pub fn srcToken(decl: Decl) Ast.TokenIndex { - const tree = &decl.getFileScope().tree; + pub fn srcToken(decl: Decl, mod: *Module) Ast.TokenIndex { + const tree = &decl.getFileScope(mod).tree; return tree.firstToken(decl.src_node); } - pub fn srcByteOffset(decl: Decl) u32 { - const tree = &decl.getFileScope().tree; + pub fn srcByteOffset(decl: Decl, mod: *Module) u32 { + const tree = &decl.getFileScope(mod).tree; return tree.tokens.items(.start)[decl.srcToken()]; } @@ -791,12 +796,12 @@ pub const Decl = struct { if (decl.name_fully_qualified) { return writer.writeAll(unqualified_name); } - return decl.src_namespace.renderFullyQualifiedName(mod, unqualified_name, writer); + return mod.namespacePtr(decl.src_namespace).renderFullyQualifiedName(mod, unqualified_name, writer); } pub fn renderFullyQualifiedDebugName(decl: Decl, mod: *Module, writer: anytype) !void { const unqualified_name = mem.sliceTo(decl.name, 0); - return decl.src_namespace.renderFullyQualifiedDebugName(mod, unqualified_name, writer); + return mod.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(mod, unqualified_name, writer); } pub fn getFullyQualifiedName(decl: Decl, mod: *Module) ![:0]u8 { @@ -877,32 +882,39 @@ pub const Decl = struct { /// Gets the namespace that this Decl creates by being a struct, union, /// enum, or opaque. /// Only returns it if the Decl is the owner. - pub fn getInnerNamespace(decl: *Decl) ?*Namespace { - if (!decl.owns_tv) return null; - const ty = (decl.val.castTag(.ty) orelse return null).data; - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return &struct_obj.namespace; - }, - .enum_full, .enum_nonexhaustive => { - const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; - return &enum_obj.namespace; - }, - .empty_struct => { - return ty.castTag(.empty_struct).?.data; - }, - .@"opaque" => { - const opaque_obj = ty.cast(Type.Payload.Opaque).?.data; - return &opaque_obj.namespace; - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - return &union_obj.namespace; - }, + pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex { + if (!decl.owns_tv) return .none; + if (decl.val.ip_index == .none) { + const ty = (decl.val.castTag(.ty) orelse return .none).data; + switch (ty.tag()) { + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + return struct_obj.namespace.toOptional(); + }, + .enum_full, .enum_nonexhaustive => { + const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; + return enum_obj.namespace.toOptional(); + }, + .empty_struct => { + @panic("TODO"); + }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + return union_obj.namespace.toOptional(); + }, - else => return null, + else => return .none, + } } + return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + else => .none, + }; + } + + /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. + pub fn getInnerNamespace(decl: *Decl, mod: *Module) ?*Namespace { + return if (getInnerNamespaceIndex(decl, mod).unwrap()) |i| mod.namespacePtr(i) else null; } pub fn dump(decl: *Decl) void { @@ -920,8 +932,8 @@ pub const Decl = struct { std.debug.print("\n", .{}); } - pub fn getFileScope(decl: Decl) *File { - return decl.src_namespace.file_scope; + pub fn getFileScope(decl: Decl, mod: *Module) *File { + return mod.namespacePtr(decl.src_namespace).file_scope; } pub fn removeDependant(decl: *Decl, other: Decl.Index) void { @@ -974,7 +986,7 @@ pub const ErrorSet = struct { pub fn srcLoc(self: ErrorSet, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; @@ -1000,7 +1012,7 @@ pub const Struct = struct { /// Set of field names in declaration order. fields: Fields, /// Represents the declarations inside this struct. - namespace: Namespace, + namespace: Namespace.Index, /// The Decl that corresponds to the struct itself. owner_decl: Decl.Index, /// Index of the struct_decl ZIR instruction. @@ -1101,7 +1113,7 @@ pub const Struct = struct { pub fn srcLoc(s: Struct, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(s.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; @@ -1110,7 +1122,7 @@ pub const Struct = struct { pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc { @setCold(true); const owner_decl = mod.declPtr(s.owner_decl); - const file = owner_decl.getFileScope(); + const file = owner_decl.getFileScope(mod); const tree = file.getTree(mod.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ @@ -1224,7 +1236,7 @@ pub const EnumSimple = struct { pub fn srcLoc(self: EnumSimple, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; @@ -1253,7 +1265,7 @@ pub const EnumNumbered = struct { pub fn srcLoc(self: EnumNumbered, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; @@ -1275,7 +1287,7 @@ pub const EnumFull = struct { /// If this hash map is empty, it means the enum tags are auto-numbered. values: ValueMap, /// Represents the declarations inside this enum. - namespace: Namespace, + namespace: Namespace.Index, /// true if zig inferred this tag type, false if user specified it tag_ty_inferred: bool, @@ -1285,7 +1297,7 @@ pub const EnumFull = struct { pub fn srcLoc(self: EnumFull, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; @@ -1294,7 +1306,7 @@ pub const EnumFull = struct { pub fn fieldSrcLoc(e: EnumFull, mod: *Module, query: FieldSrcQuery) SrcLoc { @setCold(true); const owner_decl = mod.declPtr(e.owner_decl); - const file = owner_decl.getFileScope(); + const file = owner_decl.getFileScope(mod); const tree = file.getTree(mod.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ @@ -1323,7 +1335,7 @@ pub const Union = struct { /// Set of field names in declaration order. fields: Fields, /// Represents the declarations inside this union. - namespace: Namespace, + namespace: Namespace.Index, /// The Decl that corresponds to the union itself. owner_decl: Decl.Index, /// Index of the union_decl ZIR instruction. @@ -1371,7 +1383,7 @@ pub const Union = struct { pub fn srcLoc(self: Union, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; @@ -1380,7 +1392,7 @@ pub const Union = struct { pub fn fieldSrcLoc(u: Union, mod: *Module, query: FieldSrcQuery) SrcLoc { @setCold(true); const owner_decl = mod.declPtr(u.owner_decl); - const file = owner_decl.getFileScope(); + const file = owner_decl.getFileScope(mod); const tree = file.getTree(mod.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ @@ -1563,26 +1575,6 @@ pub const Union = struct { } }; -pub const Opaque = struct { - /// The Decl that corresponds to the opaque itself. - owner_decl: Decl.Index, - /// Represents the declarations inside this opaque. - namespace: Namespace, - - pub fn srcLoc(self: Opaque, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - pub fn getFullyQualifiedName(s: *Opaque, mod: *Module) ![:0]u8 { - return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); - } -}; - /// Some extern function struct memory is owned by the Decl's TypedValue.Managed /// arena allocator. pub const ExternFn = struct { @@ -1759,7 +1751,7 @@ pub const Fn = struct { } pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool { - const file = mod.declPtr(func.owner_decl).getFileScope(); + const file = mod.declPtr(func.owner_decl).getFileScope(mod); const tags = file.zir.instructions.items(.tag); @@ -1774,7 +1766,7 @@ pub const Fn = struct { } pub fn getParamName(func: Fn, mod: *Module, index: u32) [:0]const u8 { - const file = mod.declPtr(func.owner_decl).getFileScope(); + const file = mod.declPtr(func.owner_decl).getFileScope(mod); const tags = file.zir.instructions.items(.tag); const data = file.zir.instructions.items(.data); @@ -1797,7 +1789,7 @@ pub const Fn = struct { pub fn hasInferredErrorSet(func: Fn, mod: *Module) bool { const owner_decl = mod.declPtr(func.owner_decl); - const zir = owner_decl.getFileScope().zir; + const zir = owner_decl.getFileScope(mod).zir; const zir_tags = zir.instructions.items(.tag); switch (zir_tags[func.zir_body_inst]) { .func => return false, @@ -1851,7 +1843,7 @@ pub const DeclAdapter = struct { /// The container that structs, enums, unions, and opaques have. pub const Namespace = struct { - parent: ?*Namespace, + parent: OptionalIndex, file_scope: *File, /// Will be a struct, enum, union, or opaque. ty: Type, @@ -1869,6 +1861,28 @@ pub const Namespace = struct { /// Value is whether the usingnamespace decl is marked `pub`. usingnamespace_set: std.AutoHashMapUnmanaged(Decl.Index, bool) = .{}, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + const DeclContext = struct { module: *Module, @@ -1955,10 +1969,10 @@ pub const Namespace = struct { name: []const u8, writer: anytype, ) @TypeOf(writer).Error!void { - if (ns.parent) |parent| { - const decl_index = ns.getDeclIndex(); + if (ns.parent.unwrap()) |parent| { + const decl_index = ns.getDeclIndex(mod); const decl = mod.declPtr(decl_index); - try parent.renderFullyQualifiedName(mod, mem.sliceTo(decl.name, 0), writer); + try mod.namespacePtr(parent).renderFullyQualifiedName(mod, mem.sliceTo(decl.name, 0), writer); } else { try ns.file_scope.renderFullyQualifiedName(writer); } @@ -1976,10 +1990,10 @@ pub const Namespace = struct { writer: anytype, ) @TypeOf(writer).Error!void { var separator_char: u8 = '.'; - if (ns.parent) |parent| { - const decl_index = ns.getDeclIndex(); + if (ns.parent.unwrap()) |parent| { + const decl_index = ns.getDeclIndex(mod); const decl = mod.declPtr(decl_index); - try parent.renderFullyQualifiedDebugName(mod, mem.sliceTo(decl.name, 0), writer); + try mod.namespacePtr(parent).renderFullyQualifiedDebugName(mod, mem.sliceTo(decl.name, 0), writer); } else { try ns.file_scope.renderFullyQualifiedDebugName(writer); separator_char = ':'; @@ -1990,8 +2004,8 @@ pub const Namespace = struct { } } - pub fn getDeclIndex(ns: Namespace) Decl.Index { - return ns.ty.getOwnerDecl(); + pub fn getDeclIndex(ns: Namespace, mod: *Module) Decl.Index { + return ns.ty.getOwnerDecl(mod); } }; @@ -3320,7 +3334,7 @@ pub const LazySrcLoc = union(enum) { } /// Upgrade to a `SrcLoc` based on the `Decl` provided. - pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl) SrcLoc { + pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl, mod: *Module) SrcLoc { return switch (lazy) { .unneeded, .entire_file, @@ -3328,7 +3342,7 @@ pub const LazySrcLoc = union(enum) { .token_abs, .node_abs, => .{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = 0, .lazy = lazy, }, @@ -3394,7 +3408,7 @@ pub const LazySrcLoc = union(enum) { .for_input, .for_capture_from_input, => .{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = lazy, }, @@ -3555,6 +3569,9 @@ pub fn deinit(mod: *Module) void { mod.global_assembly.deinit(gpa); mod.reference_table.deinit(gpa); + mod.namespaces_free_list.deinit(gpa); + mod.allocated_namespaces.deinit(gpa); + mod.string_literal_table.deinit(gpa); mod.string_literal_bytes.deinit(gpa); @@ -3575,8 +3592,9 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { gpa.free(kv.value); } if (decl.has_tv) { - if (decl.getInnerNamespace()) |namespace| { - namespace.destroyDecls(mod); + if (decl.getInnerNamespaceIndex(mod).unwrap()) |i| { + mod.namespacePtr(i).destroyDecls(mod); + mod.destroyNamespace(i); } } decl.clearValues(mod); @@ -3596,16 +3614,21 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { } } -pub fn declPtr(mod: *Module, decl_index: Decl.Index) *Decl { - return mod.allocated_decls.at(@enumToInt(decl_index)); +pub fn declPtr(mod: *Module, index: Decl.Index) *Decl { + return mod.allocated_decls.at(@enumToInt(index)); +} + +pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace { + return mod.allocated_namespaces.at(@enumToInt(index)); } /// Returns true if and only if the Decl is the top level struct associated with a File. pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { const decl = mod.declPtr(decl_index); - if (decl.src_namespace.parent != null) + const namespace = mod.namespacePtr(decl.src_namespace); + if (namespace.parent != .none) return false; - return decl_index == decl.src_namespace.getDeclIndex(); + return decl_index == namespace.getDeclIndex(mod); } fn freeExportList(gpa: Allocator, export_list: *ArrayListUnmanaged(*Export)) void { @@ -4076,7 +4099,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { }; } - if (decl.getInnerNamespace()) |namespace| { + if (decl.getInnerNamespace(mod)) |namespace| { for (namespace.decls.keys()) |sub_decl| { try decl_stack.append(gpa, sub_decl); } @@ -4306,7 +4329,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( mod.gpa, - decl.srcLoc(), + decl.srcLoc(mod), "unable to analyze: {s}", .{@errorName(e)}, )); @@ -4437,7 +4460,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { decl_index, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "invalid liveness: {s}", .{@errorName(err)}, ), @@ -4460,7 +4483,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { try mod.failed_decls.ensureUnusedCapacity(gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -4586,13 +4609,13 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .status = .none, .known_non_opv = undefined, .is_tuple = undefined, // set below - .namespace = .{ - .parent = null, + .namespace = try mod.createNamespace(.{ + .parent = .none, .ty = struct_ty, .file_scope = file, - }, + }), }; - const new_decl_index = try mod.allocateNewDecl(&struct_obj.namespace, 0, null); + const new_decl_index = try mod.allocateNewDecl(struct_obj.namespace, 0, null); const new_decl = mod.declPtr(new_decl_index); file.root_decl = new_decl_index.toOptional(); struct_obj.owner_decl = new_decl_index; @@ -4688,12 +4711,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { const decl = mod.declPtr(decl_index); - if (decl.getFileScope().status != .success_zir) { + if (decl.getFileScope(mod).status != .success_zir) { return error.AnalysisFail; } const gpa = mod.gpa; - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; const zir_datas = zir.instructions.items(.data); decl.analysis = .in_progress; @@ -4767,7 +4790,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { block_scope.params.deinit(gpa); } - const zir_block_index = decl.zirBlockIndex(); + const zir_block_index = decl.zirBlockIndex(mod); const inst_data = zir_datas[zir_block_index].pl_node; const extra = zir.extraData(Zir.Inst.Block, inst_data.payload_index); const body = zir.extra[extra.end..][0..extra.data.body_len]; @@ -4792,7 +4815,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { }); } const ty = try decl_tv.val.toType().copy(decl_arena_allocator); - if (ty.getNamespace() == null) { + if (ty.getNamespace(mod) == null) { return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)}); } @@ -4895,12 +4918,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.ty = try decl_tv.ty.copy(decl_arena_allocator); decl.val = try decl_tv.val.copy(decl_arena_allocator); decl.@"align" = blk: { - const align_ref = decl.zirAlignRef(); + const align_ref = decl.zirAlignRef(mod); if (align_ref == .none) break :blk 0; break :blk try sema.resolveAlign(&block_scope, align_src, align_ref); }; decl.@"linksection" = blk: { - const linksection_ref = decl.zirLinksectionRef(); + const linksection_ref = decl.zirLinksectionRef(mod); if (linksection_ref == .none) break :blk null; const bytes = try sema.resolveConstString(&block_scope, section_src, linksection_ref, "linksection must be comptime-known"); if (mem.indexOfScalar(u8, bytes, 0) != null) { @@ -4921,7 +4944,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { }; const target = sema.mod.getTarget(); - break :blk switch (decl.zirAddrspaceRef()) { + break :blk switch (decl.zirAddrspaceRef(mod)) { .none => switch (addrspace_ctx) { .function => target_util.defaultAddressSpace(target, .function), .variable => target_util.defaultAddressSpace(target, .global_mutable), @@ -5273,7 +5296,7 @@ pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void { pub fn scanNamespace( mod: *Module, - namespace: *Namespace, + namespace_index: Namespace.Index, extra_start: usize, decls_len: u32, parent_decl: *Decl, @@ -5282,6 +5305,7 @@ pub fn scanNamespace( defer tracy.end(); const gpa = mod.gpa; + const namespace = mod.namespacePtr(namespace_index); const zir = namespace.file_scope.zir; try mod.comp.work_queue.ensureUnusedCapacity(decls_len); @@ -5294,7 +5318,7 @@ pub fn scanNamespace( var decl_i: u32 = 0; var scan_decl_iter: ScanDeclIter = .{ .module = mod, - .namespace = namespace, + .namespace_index = namespace_index, .parent_decl = parent_decl, }; while (decl_i < decls_len) : (decl_i += 1) { @@ -5317,7 +5341,7 @@ pub fn scanNamespace( const ScanDeclIter = struct { module: *Module, - namespace: *Namespace, + namespace_index: Namespace.Index, parent_decl: *Decl, usingnamespace_index: usize = 0, comptime_index: usize = 0, @@ -5329,7 +5353,8 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err defer tracy.end(); const mod = iter.module; - const namespace = iter.namespace; + const namespace_index = iter.namespace_index; + const namespace = mod.namespacePtr(namespace_index); const gpa = mod.gpa; const zir = namespace.file_scope.zir; @@ -5404,7 +5429,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err ); const comp = mod.comp; if (!gop.found_existing) { - const new_decl_index = try mod.allocateNewDecl(namespace, decl_node, iter.parent_decl.src_scope); + const new_decl_index = try mod.allocateNewDecl(namespace_index, decl_node, iter.parent_decl.src_scope); const new_decl = mod.declPtr(new_decl_index); new_decl.kind = kind; new_decl.name = decl_name; @@ -5456,7 +5481,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err const decl = mod.declPtr(decl_index); if (kind == .@"test") { const src_loc = SrcLoc{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = .{ .token_offset = 1 }, }; @@ -5564,7 +5589,7 @@ pub fn clearDecl( if (decl.ty.isFnOrHasRuntimeBits(mod)) { mod.comp.bin_file.freeDecl(decl_index); } - if (decl.getInnerNamespace()) |namespace| { + if (decl.getInnerNamespace(mod)) |namespace| { try namespace.deleteAllDecls(mod, outdated_decls); } } @@ -5584,7 +5609,7 @@ pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name }); assert(!mod.declIsRoot(decl_index)); - assert(decl.src_namespace.anon_decls.swapRemove(decl_index)); + assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); const dependants = decl.dependants.keys(); for (dependants) |dep| { @@ -5612,7 +5637,7 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { log.debug("abortAnonDecl {*} ({s})", .{ decl, decl.name }); assert(!mod.declIsRoot(decl_index)); - assert(decl.src_namespace.anon_decls.swapRemove(decl_index)); + assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); // An aborted decl must not have dependants -- they must have // been aborted first and removed from this list. @@ -5689,7 +5714,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { .gpa = gpa, .arena = arena, .perm_arena = decl_arena_allocator, - .code = decl.getFileScope().zir, + .code = decl.getFileScope(mod).zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = func, @@ -5920,9 +5945,34 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { decl.analysis = .outdated; } +pub const CreateNamespaceOptions = struct { + parent: Namespace.OptionalIndex, + file_scope: *File, + ty: Type, +}; + +pub fn createNamespace(mod: *Module, options: CreateNamespaceOptions) !Namespace.Index { + if (mod.namespaces_free_list.popOrNull()) |index| return index; + const ptr = try mod.allocated_namespaces.addOne(mod.gpa); + ptr.* = .{ + .parent = options.parent, + .file_scope = options.file_scope, + .ty = options.ty, + }; + return @intToEnum(Namespace.Index, mod.allocated_namespaces.len - 1); +} + +pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { + mod.namespacePtr(index).* = undefined; + mod.namespaces_free_list.append(mod.gpa, index) catch { + // In order to keep `destroyNamespace` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Namespace until garbage collection. + }; +} + pub fn allocateNewDecl( mod: *Module, - namespace: *Namespace, + namespace: Namespace.Index, src_node: Ast.Node.Index, src_scope: ?*CaptureScope, ) !Decl.Index { @@ -6004,7 +6054,7 @@ pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedV pub fn createAnonymousDeclFromDecl( mod: *Module, src_decl: *Decl, - namespace: *Namespace, + namespace: Namespace.Index, src_scope: ?*CaptureScope, tv: TypedValue, ) !Decl.Index { @@ -6022,7 +6072,7 @@ pub fn initNewAnonDecl( mod: *Module, new_decl_index: Decl.Index, src_line: u32, - namespace: *Namespace, + namespace: Namespace.Index, typed_value: TypedValue, name: [:0]u8, ) !void { @@ -6040,7 +6090,7 @@ pub fn initNewAnonDecl( new_decl.analysis = .complete; new_decl.generation = mod.generation; - try namespace.anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); + try mod.namespacePtr(namespace).anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); // The Decl starts off with alive=false and the codegen backend will set alive=true // if the Decl is referenced by an instruction or another constant. Otherwise, @@ -6110,16 +6160,17 @@ pub const SwitchProngSrc = union(enum) { /// the LazySrcLoc in order to emit a compile error. pub fn resolve( prong_src: SwitchProngSrc, - gpa: Allocator, + mod: *Module, decl: *Decl, switch_node_offset: i32, range_expand: RangeExpand, ) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6203,11 +6254,12 @@ pub const PeerTypeCandidateSrc = union(enum) { pub fn resolve( self: PeerTypeCandidateSrc, - gpa: Allocator, + mod: *Module, decl: *Decl, candidate_i: usize, ) ?LazySrcLoc { @setCold(true); + const gpa = mod.gpa; switch (self) { .none => { @@ -6229,10 +6281,10 @@ pub const PeerTypeCandidateSrc = union(enum) { else => {}, } - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6291,15 +6343,16 @@ fn queryFieldSrc( pub fn paramSrc( func_node_offset: i32, - gpa: Allocator, + mod: *Module, decl: *Decl, param_i: usize, ) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6321,19 +6374,20 @@ pub fn paramSrc( } pub fn argSrc( + mod: *Module, call_node_offset: i32, - gpa: Allocator, decl: *Decl, start_arg_i: usize, bound_arg_src: ?LazySrcLoc, ) LazySrcLoc { + @setCold(true); + const gpa = mod.gpa; if (start_arg_i == 0 and bound_arg_src != null) return bound_arg_src.?; const arg_i = start_arg_i - @boolToInt(bound_arg_src != null); - @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6347,7 +6401,7 @@ pub fn argSrc( const node_datas = tree.nodes.items(.data); const call_args_node = tree.extra_data[node_datas[node].rhs - 1]; const call_args_offset = decl.nodeIndexToRelative(call_args_node); - return initSrc(call_args_offset, gpa, decl, arg_i); + return mod.initSrc(call_args_offset, decl, arg_i); }, else => unreachable, }; @@ -6355,16 +6409,17 @@ pub fn argSrc( } pub fn initSrc( + mod: *Module, init_node_offset: i32, - gpa: Allocator, decl: *Decl, init_index: usize, ) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6400,12 +6455,13 @@ pub fn initSrc( } } -pub fn optionsSrc(gpa: Allocator, decl: *Decl, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { +pub fn optionsSrc(mod: *Module, decl: *Decl, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6471,7 +6527,10 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { // Remove from the namespace it resides in, preserving declaration order. assert(decl.zir_decl_index != 0); - _ = decl.src_namespace.decls.orderedRemoveAdapted(@as([]const u8, mem.sliceTo(decl.name, 0)), DeclAdapter{ .mod = mod }); + _ = mod.namespacePtr(decl.src_namespace).decls.orderedRemoveAdapted( + @as([]const u8, mem.sliceTo(decl.name, 0)), + DeclAdapter{ .mod = mod }, + ); try mod.clearDecl(decl_index, &outdated_decls); mod.destroyDecl(decl_index); @@ -6541,8 +6600,11 @@ pub fn populateTestFunctions( const builtin_pkg = mod.main_pkg.table.get("builtin").?; const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file; const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?); - const builtin_namespace = root_decl.src_namespace; - const decl_index = builtin_namespace.decls.getKeyAdapted(@as([]const u8, "test_functions"), DeclAdapter{ .mod = mod }).?; + const builtin_namespace = mod.namespacePtr(root_decl.src_namespace); + const decl_index = builtin_namespace.decls.getKeyAdapted( + @as([]const u8, "test_functions"), + DeclAdapter{ .mod = mod }, + ).?; { // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions` // was not referenced by start code. @@ -6673,7 +6735,7 @@ pub fn linkerUpdateDecl(mod: *Module, decl_index: Decl.Index) !void { try mod.failed_decls.ensureUnusedCapacity(gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -7138,3 +7200,24 @@ pub fn atomicPtrAlignment( return 0; } + +pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc { + const owner_decl = mod.declPtr(opaque_type.decl); + return .{ + .file_scope = owner_decl.getFileScope(mod), + .parent_decl_node = owner_decl.src_node, + .lazy = LazySrcLoc.nodeOffset(0), + }; +} + +pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) ![:0]u8 { + return mod.declPtr(opaque_type.decl).getFullyQualifiedName(mod); +} + +pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File { + return mod.declPtr(decl_index).getFileScope(mod); +} + +pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.Index { + return mod.namespacePtr(namespace_index).getDeclIndex(mod); +} diff --git a/src/Sema.zig b/src/Sema.zig index 3d673246732e..35440395c431 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -227,7 +227,7 @@ pub const Block = struct { sema: *Sema, /// The namespace to use for lookups from this source block /// When analyzing fields, this is different from src_decl.src_namespace. - namespace: *Namespace, + namespace: Namespace.Index, /// The AIR instructions generated for this block. instructions: std.ArrayListUnmanaged(Air.Inst.Index), // `param` instructions are collected here to be used by the `func` instruction. @@ -286,6 +286,7 @@ pub const Block = struct { fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void { const parent = msg orelse return; + const mod = sema.mod; const prefix = "expression is evaluated at comptime because "; switch (cr) { .c_import => |ci| { @@ -293,12 +294,12 @@ pub const Block = struct { }, .comptime_ret_ty => |rt| { const src_loc = if (try sema.funcDeclSrc(rt.func)) |fn_decl| blk: { - var src_loc = fn_decl.srcLoc(); + var src_loc = fn_decl.srcLoc(mod); src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 }; break :blk src_loc; } else blk: { const src_decl = sema.mod.declPtr(rt.block.src_decl); - break :blk rt.func_src.toSrcLoc(src_decl); + break :blk rt.func_src.toSrcLoc(src_decl, mod); }; if (rt.return_ty.isGenericPoison()) { return sema.mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{}); @@ -399,8 +400,8 @@ pub const Block = struct { }; } - pub fn getFileScope(block: *Block) *Module.File { - return block.namespace.file_scope; + pub fn getFileScope(block: *Block, mod: *Module) *Module.File { + return mod.namespacePtr(block.namespace).file_scope; } fn addTy( @@ -876,6 +877,7 @@ fn analyzeBodyInner( wip_captures.deinit(); }; + const mod = sema.mod; const map = &sema.inst_map; const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); @@ -896,7 +898,7 @@ fn analyzeBodyInner( crash_info.setBodyIndex(i); const inst = body[i]; std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{ - sema.mod.declPtr(block.src_decl).src_namespace.file_scope.sub_file_path, inst, + mod.namespacePtr(mod.declPtr(block.src_decl).src_namespace).file_scope.sub_file_path, inst, }); const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off @@ -1574,7 +1576,6 @@ fn analyzeBodyInner( }, .condbr => blk: { if (!block.is_comptime) break sema.zirCondbr(block, inst); - const mod = sema.mod; // Same as condbr_inline. TODO https://github.com/ziglang/zig/issues/8220 const inst_data = datas[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; @@ -1597,7 +1598,6 @@ fn analyzeBodyInner( } }, .condbr_inline => blk: { - const mod = sema.mod; const inst_data = datas[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); @@ -1622,7 +1622,6 @@ fn analyzeBodyInner( }, .@"try" => blk: { if (!block.is_comptime) break :blk try sema.zirTry(block, inst); - const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -1632,7 +1631,7 @@ fn analyzeBodyInner( const err_union_ty = sema.typeOf(err_union); if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); @@ -1654,7 +1653,6 @@ fn analyzeBodyInner( }, .try_ptr => blk: { if (!block.is_comptime) break :blk try sema.zirTryPtr(block, inst); - const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -1713,7 +1711,7 @@ fn analyzeBodyInner( const noreturn_inst = block.instructions.popOrNull(); while (dbg_block_begins > 0) { dbg_block_begins -= 1; - if (block.is_comptime or sema.mod.comp.bin_file.options.strip) continue; + if (block.is_comptime or mod.comp.bin_file.options.strip) continue; _ = try block.addInst(.{ .tag = .dbg_block_end, @@ -2172,7 +2170,7 @@ fn errNote( ) error{OutOfMemory}!void { const mod = sema.mod; const src_decl = mod.declPtr(block.src_decl); - return mod.errNoteNonLazy(src.toSrcLoc(src_decl), parent, format, args); + return mod.errNoteNonLazy(src.toSrcLoc(src_decl, mod), parent, format, args); } fn addFieldErrNote( @@ -2185,19 +2183,19 @@ fn addFieldErrNote( ) !void { @setCold(true); const mod = sema.mod; - const decl_index = container_ty.getOwnerDecl(); + const decl_index = container_ty.getOwnerDecl(mod); const decl = mod.declPtr(decl_index); const field_src = blk: { - const tree = decl.getFileScope().getTree(sema.gpa) catch |err| { + const tree = decl.getFileScope(mod).getTree(sema.gpa) catch |err| { log.err("unable to load AST to report compile error: {s}", .{@errorName(err)}); - break :blk decl.srcLoc(); + break :blk decl.srcLoc(mod); }; const container_node = decl.relativeToNodeIndex(0); const node_tags = tree.nodes.items(.tag); var buf: [2]std.zig.Ast.Node.Index = undefined; - const container_decl = tree.fullContainerDecl(&buf, container_node) orelse break :blk decl.srcLoc(); + const container_decl = tree.fullContainerDecl(&buf, container_node) orelse break :blk decl.srcLoc(mod); var it_index: usize = 0; for (container_decl.ast.members) |member_node| { @@ -2207,7 +2205,7 @@ fn addFieldErrNote( .container_field, => { if (it_index == field_index) { - break :blk decl.nodeOffsetSrcLoc(decl.nodeIndexToRelative(member_node)); + break :blk decl.nodeOffsetSrcLoc(decl.nodeIndexToRelative(member_node), mod); } it_index += 1; }, @@ -2228,7 +2226,7 @@ fn errMsg( ) error{OutOfMemory}!*Module.ErrorMsg { const mod = sema.mod; const src_decl = mod.declPtr(block.src_decl); - return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(src_decl), format, args); + return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(src_decl, mod), format, args); } pub fn fail( @@ -2287,7 +2285,7 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { if (gop.found_existing) break; if (cur_reference_trace < max_references) { const decl = sema.mod.declPtr(ref.referencer); - try reference_stack.append(.{ .decl = decl.name, .src_loc = ref.src.toSrcLoc(decl) }); + try reference_stack.append(.{ .decl = decl.name, .src_loc = ref.src.toSrcLoc(decl, mod) }); } referenced_by = ref.referencer; } @@ -2664,7 +2662,7 @@ pub fn analyzeStructDecl( } } - _ = try sema.mod.scanNamespace(&struct_obj.namespace, extra_index, decls_len, new_decl); + _ = try sema.mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl); } fn zirStructDecl( @@ -2702,15 +2700,12 @@ fn zirStructDecl( .status = .none, .known_non_opv = undefined, .is_tuple = small.is_tuple, - .namespace = .{ - .parent = block.namespace, + .namespace = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), .ty = struct_ty, - .file_scope = block.getFileScope(), - }, + .file_scope = block.getFileScope(mod), + }), }; - std.log.scoped(.module).debug("create struct {*} owned by {*} ({s})", .{ - &struct_obj.namespace, new_decl, new_decl.name, - }); try sema.analyzeStructDecl(new_decl, inst, struct_obj); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); @@ -2887,15 +2882,12 @@ fn zirEnumDecl( .tag_ty_inferred = true, .fields = .{}, .values = .{}, - .namespace = .{ - .parent = block.namespace, + .namespace = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), .ty = enum_ty, - .file_scope = block.getFileScope(), - }, + .file_scope = block.getFileScope(mod), + }), }; - std.log.scoped(.module).debug("create enum {*} owned by {*} ({s})", .{ - &enum_obj.namespace, new_decl, new_decl.name, - }); try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index); @@ -2905,7 +2897,7 @@ fn zirEnumDecl( const decl_arena_allocator = new_decl.value_arena.?.acquire(gpa, &decl_arena); defer new_decl.value_arena.?.release(&decl_arena); - extra_index = try mod.scanNamespace(&enum_obj.namespace, extra_index, decls_len, new_decl); + extra_index = try mod.scanNamespace(enum_obj.namespace, extra_index, decls_len, new_decl); const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; @@ -2944,7 +2936,7 @@ fn zirEnumDecl( .parent = null, .sema = sema, .src_decl = new_decl_index, - .namespace = &enum_obj.namespace, + .namespace = enum_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -3164,17 +3156,14 @@ fn zirUnionDecl( .zir_index = inst, .layout = small.layout, .status = .none, - .namespace = .{ - .parent = block.namespace, + .namespace = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), .ty = union_ty, - .file_scope = block.getFileScope(), - }, + .file_scope = block.getFileScope(mod), + }), }; - std.log.scoped(.module).debug("create union {*} owned by {*} ({s})", .{ - &union_obj.namespace, new_decl, new_decl.name, - }); - _ = try mod.scanNamespace(&union_obj.namespace, extra_index, decls_len, new_decl); + _ = try mod.scanNamespace(union_obj.namespace, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); @@ -3208,37 +3197,37 @@ fn zirOpaqueDecl( var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque); - const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque); - opaque_ty_payload.* = .{ - .base = .{ .tag = .@"opaque" }, - .data = opaque_obj, - }; - const opaque_ty = Type.initPayload(&opaque_ty_payload.base); - const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); + // Because these three things each reference each other, `undefined` + // placeholders are used in two places before being set after the opaque + // type gains an InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = opaque_val, + .val = undefined, }, small.name_strategy, "opaque", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - opaque_obj.* = .{ - .owner_decl = new_decl_index, - .namespace = .{ - .parent = block.namespace, - .ty = opaque_ty, - .file_scope = block.getFileScope(), - }, - }; - std.log.scoped(.module).debug("create opaque {*} owned by {*} ({s})", .{ - &opaque_obj.namespace, new_decl, new_decl.name, + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer @panic("TODO error handling"); + + const opaque_ty = try mod.intern_pool.get(gpa, .{ .opaque_type = .{ + .decl = new_decl_index, + .namespace = new_namespace_index, + } }); + errdefer @panic("TODO error handling"); + + new_decl.val = opaque_ty.toValue(); + new_namespace.ty = opaque_ty.toType(); - extra_index = try mod.scanNamespace(&opaque_obj.namespace, extra_index, decls_len, new_decl); + extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); @@ -4848,7 +4837,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl), elem_ty); + try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), elem_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -4870,7 +4859,7 @@ fn failWithBadMemberAccess( .Enum => "enum", else => unreachable, }; - if (agg_ty.getOwnerDeclOrNull()) |some| if (sema.mod.declIsRoot(some)) { + if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (sema.mod.declIsRoot(some)) { return sema.fail(block, field_src, "root struct of file '{}' has no member named '{s}'", .{ agg_ty.fmt(sema.mod), field_name, }); @@ -5632,7 +5621,7 @@ fn analyzeBlockBody( try sema.errNote(child_block, runtime_src, msg, "runtime control flow here", .{}); const child_src_decl = mod.declPtr(child_block.src_decl); - try sema.explainWhyTypeIsComptime(msg, type_src.toSrcLoc(child_src_decl), resolved_ty); + try sema.explainWhyTypeIsComptime(msg, type_src.toSrcLoc(child_src_decl, mod), resolved_ty); break :msg msg; }; @@ -5703,6 +5692,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data; const src = inst_data.src(); @@ -5711,7 +5701,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const decl_name = sema.code.nullTerminatedString(extra.decl_name); const decl_index = if (extra.namespace != .none) index_blk: { const container_ty = try sema.resolveType(block, operand_src, extra.namespace); - const container_namespace = container_ty.getNamespace().?; + const container_namespace = container_ty.getNamespaceIndex(mod).unwrap().?; const maybe_index = try sema.lookupInNamespace(block, operand_src, container_namespace, decl_name, false); break :index_blk maybe_index orelse @@ -5725,8 +5715,8 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void else => |e| return e, }; { - try sema.mod.ensureDeclAnalyzed(decl_index); - const exported_decl = sema.mod.declPtr(decl_index); + try mod.ensureDeclAnalyzed(decl_index); + const exported_decl = mod.declPtr(decl_index); if (exported_decl.val.castTag(.function)) |some| { return sema.analyzeExport(block, src, options, some.data.owner_decl); } @@ -5789,7 +5779,7 @@ pub fn analyzeExport( errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), exported_decl.ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), exported_decl.ty, .other); try sema.addDeclaredHereNote(msg, exported_decl.ty); break :msg msg; @@ -6075,12 +6065,13 @@ fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8) !Decl.Index { + const mod = sema.mod; var namespace = block.namespace; while (true) { if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl_index| { return decl_index; } - namespace = namespace.parent orelse break; + namespace = mod.namespacePtr(namespace).parent.unwrap() orelse break; } unreachable; // AstGen detects use of undeclared identifier errors. } @@ -6091,13 +6082,14 @@ fn lookupInNamespace( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, + namespace_index: Namespace.Index, ident_name: []const u8, observe_usingnamespace: bool, ) CompileError!?Decl.Index { const mod = sema.mod; - const namespace_decl_index = namespace.getDeclIndex(); + const namespace = mod.namespacePtr(namespace_index); + const namespace_decl_index = namespace.getDeclIndex(mod); const namespace_decl = sema.mod.declPtr(namespace_decl_index); if (namespace_decl.analysis == .file_failure) { try mod.declareDeclDependency(sema.owner_decl_index, namespace_decl_index); @@ -6105,7 +6097,7 @@ fn lookupInNamespace( } if (observe_usingnamespace and namespace.usingnamespace_set.count() != 0) { - const src_file = block.namespace.file_scope; + const src_file = mod.namespacePtr(block.namespace).file_scope; const gpa = sema.gpa; var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, bool) = .{}; @@ -6124,7 +6116,7 @@ fn lookupInNamespace( // Skip decls which are not marked pub, which are in a different // file than the `a.b`/`@hasDecl` syntax. const decl = mod.declPtr(decl_index); - if (decl.is_pub or (src_file == decl.getFileScope() and checked_namespaces.values()[check_i])) { + if (decl.is_pub or (src_file == decl.getFileScope(mod) and checked_namespaces.values()[check_i])) { try candidates.append(gpa, decl_index); } } @@ -6135,15 +6127,15 @@ fn lookupInNamespace( if (sub_usingnamespace_decl_index == sema.owner_decl_index) continue; const sub_usingnamespace_decl = mod.declPtr(sub_usingnamespace_decl_index); const sub_is_pub = entry.value_ptr.*; - if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope()) { + if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope(mod)) { // Skip usingnamespace decls which are not marked pub, which are in // a different file than the `a.b`/`@hasDecl` syntax. continue; } try sema.ensureDeclAnalyzed(sub_usingnamespace_decl_index); const ns_ty = sub_usingnamespace_decl.val.castTag(.ty).?.data; - const sub_ns = ns_ty.getNamespace().?; - try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope()); + const sub_ns = ns_ty.getNamespace(mod).?; + try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope(mod)); } } @@ -6171,7 +6163,7 @@ fn lookupInNamespace( errdefer msg.destroy(gpa); for (candidates.items) |candidate_index| { const candidate = mod.declPtr(candidate_index); - const src_loc = candidate.srcLoc(); + const src_loc = candidate.srcLoc(mod); try mod.errNoteNonLazy(src_loc, msg, "declared here", .{}); } break :msg msg; @@ -6532,7 +6524,7 @@ fn checkCallArgumentCount( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(), msg, "function declared here", .{}); + if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -6669,7 +6661,7 @@ fn analyzeCall( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(), msg, "function declared here", .{}); + if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -6811,7 +6803,7 @@ fn analyzeCall( // than create a child one. const parent_zir = sema.code; const fn_owner_decl = mod.declPtr(module_fn.owner_decl); - sema.code = fn_owner_decl.getFileScope().zir; + sema.code = fn_owner_decl.getFileScope(mod).zir; defer sema.code = parent_zir; try mod.declareDeclDependencyType(sema.owner_decl_index, module_fn.owner_decl, .function_body); @@ -6911,7 +6903,7 @@ fn analyzeCall( try sema.analyzeInlineCallArg( block, &child_block, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, arg_i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src), inst, new_fn_info, &arg_i, @@ -7098,7 +7090,7 @@ fn analyzeCall( const decl = sema.mod.declPtr(block.src_decl); _ = try sema.analyzeCallArg( block, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src), param_ty, uncasted_arg, opts, @@ -7114,7 +7106,7 @@ fn analyzeCall( _ = try sema.coerceVarArgParam( block, uncasted_arg, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src), ); unreachable; }, @@ -7406,7 +7398,8 @@ fn instantiateGenericCall( // can match against `uncasted_args` rather than doing the work below to create a // generic Scope only to junk it if it matches an existing instantiation. const fn_owner_decl = mod.declPtr(module_fn.owner_decl); - const namespace = fn_owner_decl.src_namespace; + const namespace_index = fn_owner_decl.src_namespace; + const namespace = mod.namespacePtr(namespace_index); const fn_zir = namespace.file_scope.zir; const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); const zir_tags = fn_zir.instructions.items(.tag); @@ -7456,7 +7449,7 @@ fn instantiateGenericCall( const arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, uncasted_args[i]) catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); - const arg_src = Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src); + const arg_src = mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src); _ = try sema.analyzeGenericCallArgVal(block, arg_src, uncasted_args[i]); unreachable; }, @@ -7519,9 +7512,9 @@ fn instantiateGenericCall( try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); // Create a Decl for the new function. - const src_decl_index = namespace.getDeclIndex(); + const src_decl_index = namespace.getDeclIndex(mod); const src_decl = mod.declPtr(src_decl_index); - const new_decl_index = try mod.allocateNewDecl(namespace, fn_owner_decl.src_node, src_decl.src_scope); + const new_decl_index = try mod.allocateNewDecl(namespace_index, fn_owner_decl.src_node, src_decl.src_scope); const new_decl = mod.declPtr(new_decl_index); // TODO better names for generic function instantiations const decl_name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{ @@ -7559,7 +7552,7 @@ fn instantiateGenericCall( uncasted_args, module_fn, new_module_func, - namespace, + namespace_index, func_ty_info, call_src, bound_arg_src, @@ -7631,7 +7624,7 @@ fn instantiateGenericCall( const decl = sema.mod.declPtr(block.src_decl); _ = try sema.analyzeGenericCallArg( block, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, total_i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, total_i, bound_arg_src), uncasted_args[total_i], comptime_args[total_i], runtime_args, @@ -7692,7 +7685,7 @@ fn resolveGenericInstantiationType( uncasted_args: []const Air.Inst.Ref, module_fn: *Module.Fn, new_module_func: *Module.Fn, - namespace: *Namespace, + namespace: Namespace.Index, func_ty_info: Type.Payload.Function.Data, call_src: LazySrcLoc, bound_arg_src: ?LazySrcLoc, @@ -7779,7 +7772,7 @@ fn resolveGenericInstantiationType( const arg_val = sema.resolveConstValue(block, .unneeded, arg, "") catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); - const arg_src = Module.argSrc(call_src.node_offset.x, sema.gpa, decl, arg_i, bound_arg_src); + const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); _ = try sema.resolveConstValue(block, arg_src, arg, "argument to parameter with comptime-only type must be comptime-known"); unreachable; }, @@ -8987,7 +8980,7 @@ fn funcCommon( const decl = sema.mod.declPtr(block.src_decl); try sema.analyzeParameter( block, - Module.paramSrc(src_node_offset, sema.gpa, decl, i), + Module.paramSrc(src_node_offset, mod, decl, i), param, comptime_params, i, @@ -9050,7 +9043,7 @@ fn funcCommon( errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl), return_type, .ret_ty); + try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty); try sema.addDeclaredHereNote(msg, return_type); break :msg msg; @@ -9070,7 +9063,7 @@ fn funcCommon( "function with comptime-only return type '{}' requires all parameters to be comptime", .{return_type.fmt(sema.mod)}, ); - try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl), return_type); + try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl, mod), return_type); const tags = sema.code.instructions.items(.tag); const data = sema.code.instructions.items(.data); @@ -9278,7 +9271,7 @@ fn analyzeParameter( errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl), param.ty, .param_ty); + try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl, mod), param.ty, .param_ty); try sema.addDeclaredHereNote(msg, param.ty); break :msg msg; @@ -9293,7 +9286,7 @@ fn analyzeParameter( errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl), param.ty); + try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl, mod), param.ty); try sema.addDeclaredHereNote(msg, param.ty); break :msg msg; @@ -10233,15 +10226,15 @@ fn zirSwitchCapture( if (!field.ty.eql(first_field.ty, sema.mod)) { const msg = msg: { const raw_capture_src = Module.SwitchProngSrc{ .multi_capture = capture_info.prong_index }; - const capture_src = raw_capture_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); + const capture_src = raw_capture_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); const msg = try sema.errMsg(block, capture_src, "capture group with incompatible types", .{}); errdefer msg.destroy(sema.gpa); const raw_first_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 0 } }; - const first_item_src = raw_first_item_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); + const first_item_src = raw_first_item_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); const raw_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 1 + @intCast(u32, i) } }; - const item_src = raw_item_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); + const item_src = raw_item_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); try sema.errNote(block, first_item_src, msg, "type '{}' here", .{first_field.ty.fmt(sema.mod)}); try sema.errNote(block, item_src, msg, "type '{}' here", .{field.ty.fmt(sema.mod)}); break :msg msg; @@ -11265,7 +11258,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .range = .{ .prong = multi_i, .item = range_i } }; const decl = mod.declPtr(case_block.src_decl); - try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); + try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none)); unreachable; }, else => return err, @@ -11301,7 +11294,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }; const decl = mod.declPtr(case_block.src_decl); - try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); + try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none)); unreachable; }, else => return err, @@ -11724,6 +11717,7 @@ fn resolveSwitchItemVal( switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, ) CompileError!TypedValue { + const mod = sema.mod; const item = try sema.resolveInst(item_ref); const item_ty = sema.typeOf(item); // Constructing a LazySrcLoc is costly because we only have the switch AST node. @@ -11734,7 +11728,7 @@ fn resolveSwitchItemVal( return TypedValue{ .ty = item_ty, .val = val }; } else |err| switch (err) { error.NeededSourceLocation => { - const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_node_offset, range_expand); + const src = switch_prong_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_node_offset, range_expand); _ = try sema.resolveConstValue(block, src, item, "switch prong values must be comptime-known"); unreachable; }, @@ -11752,10 +11746,11 @@ fn validateSwitchRange( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { + const mod = sema.mod; const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; - if (first_val.compareScalar(.gt, last_val, operand_ty, sema.mod)) { - const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), src_node_offset, .first); + if (first_val.compareScalar(.gt, last_val, operand_ty, mod)) { + const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), src_node_offset, .first); return sema.fail(block, src, "range start value is greater than the end value", .{}); } const maybe_prev_src = try range_set.add(first_val, last_val, operand_ty, switch_prong_src); @@ -11821,10 +11816,10 @@ fn validateSwitchDupe( src_node_offset: i32, ) CompileError!void { const prev_prong_src = maybe_prev_src orelse return; - const gpa = sema.gpa; + const mod = sema.mod; const block_src_decl = sema.mod.declPtr(block.src_decl); - const src = switch_prong_src.resolve(gpa, block_src_decl, src_node_offset, .none); - const prev_src = prev_prong_src.resolve(gpa, block_src_decl, src_node_offset, .none); + const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); + const prev_src = prev_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); const msg = msg: { const msg = try sema.errMsg( block, @@ -11863,7 +11858,7 @@ fn validateSwitchItemBool( } if (true_count.* + false_count.* > 2) { const block_src_decl = sema.mod.declPtr(block.src_decl); - const src = switch_prong_src.resolve(sema.gpa, block_src_decl, src_node_offset, .none); + const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); return sema.fail(block, src, "duplicate switch value", .{}); } } @@ -12068,6 +12063,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -12078,10 +12074,11 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air try sema.checkNamespaceType(block, lhs_src, container_type); - const namespace = container_type.getNamespace() orelse return Air.Inst.Ref.bool_false; + const namespace = container_type.getNamespaceIndex(mod).unwrap() orelse + return Air.Inst.Ref.bool_false; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| { - const decl = sema.mod.declPtr(decl_index); - if (decl.is_pub or decl.getFileScope() == block.getFileScope()) { + const decl = mod.declPtr(decl_index); + if (decl.is_pub or decl.getFileScope(mod) == block.getFileScope(mod)) { return Air.Inst.Ref.bool_true; } } @@ -12097,12 +12094,12 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const operand_src = inst_data.src(); const operand = inst_data.get(sema.code); - const result = mod.importFile(block.getFileScope(), operand) catch |err| switch (err) { + const result = mod.importFile(block.getFileScope(mod), operand) catch |err| switch (err) { error.ImportOutsidePkgPath => { return sema.fail(block, operand_src, "import of file outside package path: '{s}'", .{operand}); }, error.PackageNotFound => { - const name = try block.getFileScope().pkg.getName(sema.gpa, mod.*); + const name = try block.getFileScope(mod).pkg.getName(sema.gpa, mod.*); defer sema.gpa.free(name); return sema.fail(block, operand_src, "no package named '{s}' available within package '{s}'", .{ operand, name }); }, @@ -12128,7 +12125,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const name = try sema.resolveConstString(block, operand_src, inst_data.operand, "file path name must be comptime-known"); - const embed_file = mod.embedFile(block.getFileScope(), name) catch |err| switch (err) { + const embed_file = mod.embedFile(block.getFileScope(mod), name) catch |err| switch (err) { error.ImportOutsidePkgPath => { return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name}); }, @@ -15666,7 +15663,8 @@ fn zirThis( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const this_decl_index = block.namespace.getDeclIndex(); + const mod = sema.mod; + const this_decl_index = mod.namespaceDeclIndex(block.namespace); const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); return sema.analyzeDeclVal(block, src, this_decl_index); } @@ -15698,9 +15696,10 @@ fn zirClosureGet( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; // TODO CLOSURE: Test this with inline functions const inst_data = sema.code.instructions.items(.data)[inst].inst_node; - var scope: *CaptureScope = sema.mod.declPtr(block.src_decl).src_scope.?; + var scope: *CaptureScope = mod.declPtr(block.src_decl).src_scope.?; // Note: The target closure must be in this scope list. // If it's not here, the zir is invalid, or the list is broken. const tv = while (true) { @@ -15725,8 +15724,8 @@ fn zirClosureGet( if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func == null) { const msg = msg: { const name = name: { - const file = sema.owner_decl.getFileScope(); - const tree = file.getTree(sema.mod.gpa) catch |err| { + const file = sema.owner_decl.getFileScope(mod); + const tree = file.getTree(mod.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15753,8 +15752,8 @@ fn zirClosureGet( if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) { const msg = msg: { const name = name: { - const file = sema.owner_decl.getFileScope(); - const tree = file.getTree(sema.mod.gpa) catch |err| { + const file = sema.owner_decl.getFileScope(mod); + const tree = file.getTree(mod.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15825,7 +15824,7 @@ fn zirBuiltinSrc( const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{}); - const fn_owner_decl = sema.mod.declPtr(func.owner_decl); + const fn_owner_decl = mod.declPtr(func.owner_decl); const func_name_val = blk: { var anon_decl = try block.startAnonDecl(); @@ -15844,7 +15843,7 @@ fn zirBuiltinSrc( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); // The compiler must not call realpath anywhere. - const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena()); + const name = try fn_owner_decl.getFileScope(mod).fullPathZ(anon_decl.arena()); const new_decl = try anon_decl.finish( try Type.array(anon_decl.arena(), name.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), @@ -15980,22 +15979,22 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fn_info_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, + type_info_ty.getNamespaceIndex(mod).unwrap().?, "Fn", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); try sema.ensureDeclAnalyzed(fn_info_decl_index); - const fn_info_decl = sema.mod.declPtr(fn_info_decl_index); + const fn_info_decl = mod.declPtr(fn_info_decl_index); const fn_ty = fn_info_decl.val.toType(); const param_info_decl_index = (try sema.namespaceLookup( block, src, - fn_ty.getNamespace().?, + fn_ty.getNamespaceIndex(mod).unwrap().?, "Param", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); try sema.ensureDeclAnalyzed(param_info_decl_index); - const param_info_decl = sema.mod.declPtr(param_info_decl_index); + const param_info_decl = mod.declPtr(param_info_decl_index); const param_ty = param_info_decl.val.toType(); const new_decl = try params_anon_decl.finish( try Type.Tag.array.create(params_anon_decl.arena(), .{ @@ -16169,12 +16168,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const set_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, + type_info_ty.getNamespaceIndex(mod).unwrap().?, "Error", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); try sema.ensureDeclAnalyzed(set_field_ty_decl_index); - const set_field_ty_decl = sema.mod.declPtr(set_field_ty_decl_index); + const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index); break :t try set_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; @@ -16277,12 +16276,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const enum_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, + type_info_ty.getNamespaceIndex(mod).unwrap().?, "EnumField", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); - const enum_field_ty_decl = sema.mod.declPtr(enum_field_ty_decl_index); + const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index); break :t try enum_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; @@ -16336,7 +16335,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :v try Value.Tag.decl_ref.create(sema.arena, new_decl); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace(mod)); const field_values = try sema.arena.create([4]Value); field_values.* = .{ @@ -16368,12 +16367,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const union_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, + type_info_ty.getNamespaceIndex(mod).unwrap().?, "UnionField", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); try sema.ensureDeclAnalyzed(union_field_ty_decl_index); - const union_field_ty_decl = sema.mod.declPtr(union_field_ty_decl_index); + const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index); break :t try union_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; @@ -16434,7 +16433,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespace(mod)); const enum_tag_ty_val = if (union_ty.unionTagType()) |tag_ty| v: { const ty_val = try Value.Tag.ty.create(sema.arena, tag_ty); @@ -16475,12 +16474,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const struct_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, + type_info_ty.getNamespaceIndex(mod).unwrap().?, "StructField", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); - const struct_field_ty_decl = sema.mod.declPtr(struct_field_ty_decl_index); + const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); break :t try struct_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; const struct_ty = try sema.resolveTypeFields(ty); @@ -16597,7 +16596,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespace(mod)); const backing_integer_val = blk: { if (layout == .Packed) { @@ -16640,7 +16639,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // TODO: look into memoizing this result. const opaque_ty = try sema.resolveTypeFields(ty); - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespace(mod)); const field_values = try sema.arena.create([1]Value); field_values.* = .{ @@ -16676,7 +16675,7 @@ fn typeInfoDecls( const declaration_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, + type_info_ty.getNamespaceIndex(mod).unwrap().?, "Declaration", )).?; try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); @@ -16730,7 +16729,7 @@ fn typeInfoNamespaceDecls( if (decl.kind == .@"usingnamespace") { if (decl.analysis == .in_progress) continue; try mod.ensureDeclAnalyzed(decl_index); - const new_ns = decl.val.toType().getNamespace().?; + const new_ns = decl.val.toType().getNamespace(mod).?; try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); continue; } @@ -17750,7 +17749,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src.toSrcLoc(src_decl), elem_ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src.toSrcLoc(src_decl, mod), elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); break :msg msg; @@ -18006,6 +18005,7 @@ fn finishStructInit( struct_ty: Type, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; var root_msg: ?*Module.ErrorMsg = null; @@ -18118,8 +18118,8 @@ fn finishStructInit( sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(dest_src.node_offset.x, sema.gpa, decl, runtime_index); + const decl = mod.declPtr(block.src_decl); + const field_src = mod.initSrc(dest_src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, dest_src, field_src); unreachable; }, @@ -18158,11 +18158,11 @@ fn zirStructInitAnon( if (gop.found_existing) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); + const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); errdefer msg.destroy(sema.gpa); - const prev_source = Module.initSrc(src.node_offset.x, sema.gpa, decl, gop.value_ptr.*); + const prev_source = mod.initSrc(src.node_offset.x, decl, gop.value_ptr.*); try sema.errNote(block, prev_source, msg, "other field here", .{}); break :msg msg; }; @@ -18175,7 +18175,7 @@ fn zirStructInitAnon( if (types[i].zigTypeTag(mod) == .Opaque) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); + const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); @@ -18208,7 +18208,7 @@ fn zirStructInitAnon( sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, runtime_index); + const field_src = mod.initSrc(src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, src, field_src); unreachable; }, @@ -18283,7 +18283,7 @@ fn zirArrayInit( resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); - const elem_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); + const elem_src = mod.initSrc(src.node_offset.x, decl, i); _ = try sema.coerce(block, elem_ty, resolved_arg, elem_src); unreachable; }, @@ -18315,7 +18315,7 @@ fn zirArrayInit( sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); - const elem_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, runtime_index); + const elem_src = mod.initSrc(src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, src, elem_src); unreachable; }, @@ -18724,7 +18724,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air enum_ty.fmt(mod), }); } - const enum_decl_index = enum_ty.getOwnerDecl(); + const enum_decl_index = enum_ty.getOwnerDecl(mod); const casted_operand = try sema.coerce(block, enum_ty, operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, casted_operand)) |val| { const field_index = enum_ty.enumTagFieldIndex(val, mod) orelse { @@ -18734,7 +18734,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air val.fmtValue(enum_ty, sema.mod), enum_decl.name, }); errdefer msg.destroy(sema.gpa); - try mod.errNoteNonLazy(enum_decl.srcLoc(), msg, "declared here", .{}); + try mod.errNoteNonLazy(enum_decl.srcLoc(mod), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -18760,6 +18760,7 @@ fn zirReify( inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const gpa = sema.gpa; const name_strategy = @intToEnum(Zir.Inst.NameStrategy, extended.small); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); @@ -18887,10 +18888,10 @@ fn zirReify( if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), elem_ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); break :msg msg; @@ -19043,7 +19044,6 @@ fn zirReify( return sema.fail(block, src, "reified enums must have no decls", .{}); } - const gpa = sema.gpa; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); @@ -19076,11 +19076,11 @@ fn zirReify( .tag_ty_inferred = false, .fields = .{}, .values = .{}, - .namespace = .{ - .parent = block.namespace, + .namespace = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), .ty = enum_ty, - .file_scope = block.getFileScope(), - }, + .file_scope = block.getFileScope(mod), + }), }; // Enum tag type @@ -19164,34 +19164,37 @@ fn zirReify( return sema.fail(block, src, "reified opaque must have no decls", .{}); } - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque); - const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque); - opaque_ty_payload.* = .{ - .base = .{ .tag = .@"opaque" }, - .data = opaque_obj, - }; - const opaque_ty = Type.initPayload(&opaque_ty_payload.base); - const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); + // Because these three things each reference each other, + // `undefined` placeholders are used in two places before being set + // after the opaque type gains an InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = opaque_val, + .val = undefined, }, name_strategy, "opaque", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - opaque_obj.* = .{ - .owner_decl = new_decl_index, - .namespace = .{ - .parent = block.namespace, - .ty = opaque_ty, - .file_scope = block.getFileScope(), - }, - }; + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer @panic("TODO error handling"); + + const opaque_ty = try mod.intern_pool.get(gpa, .{ .opaque_type = .{ + .decl = new_decl_index, + .namespace = new_namespace_index, + } }); + errdefer @panic("TODO error handling"); + + new_decl.val = opaque_ty.toValue(); + new_namespace.ty = opaque_ty.toType(); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); @@ -19214,7 +19217,7 @@ fn zirReify( } const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout); - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); @@ -19248,11 +19251,11 @@ fn zirReify( .zir_index = inst, .layout = layout, .status = .have_field_types, - .namespace = .{ - .parent = block.namespace, + .namespace = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), .ty = union_ty, - .file_scope = block.getFileScope(), - }, + .file_scope = block.getFileScope(mod), + }), }; // Tag type @@ -19301,7 +19304,7 @@ fn zirReify( if (!enum_has_field) { const msg = msg: { const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) }); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; @@ -19324,7 +19327,7 @@ fn zirReify( if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19334,10 +19337,10 @@ fn zirReify( if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), field_ty, .union_field); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .union_field); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19346,10 +19349,10 @@ fn zirReify( } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl), field_ty); + try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19362,7 +19365,7 @@ fn zirReify( if (names.count() > 0) { const msg = msg: { const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const enum_ty = union_obj.tag_ty; for (names.keys()) |field_name| { @@ -19513,11 +19516,11 @@ fn reifyStruct( .status = .have_field_types, .known_non_opv = false, .is_tuple = is_tuple, - .namespace = .{ - .parent = block.namespace, + .namespace = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), .ty = struct_ty, - .file_scope = block.getFileScope(), - }, + .file_scope = block.getFileScope(mod), + }), }; // Fields @@ -19629,7 +19632,7 @@ fn reifyStruct( errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), field_ty, .struct_field); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .struct_field); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19641,7 +19644,7 @@ fn reifyStruct( errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl), field_ty); + try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19741,6 +19744,7 @@ fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.In } fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -19755,7 +19759,7 @@ fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl), arg_ty, .param_ty); + try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl, mod), arg_ty, .param_ty); try sema.addDeclaredHereNote(msg, arg_ty); break :msg msg; @@ -21006,7 +21010,8 @@ fn checkVectorizableBinaryOperands( fn maybeOptionsSrc(sema: *Sema, block: *Block, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { if (base_src == .unneeded) return .unneeded; - return Module.optionsSrc(sema.gpa, sema.mod.declPtr(block.src_decl), base_src, wanted); + const mod = sema.mod; + return mod.optionsSrc(sema.mod.declPtr(block.src_decl), base_src, wanted); } fn resolveExportOptions( @@ -23067,7 +23072,7 @@ fn zirBuiltinExtern( const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl), ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl, mod), ty, .other); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -23087,9 +23092,9 @@ fn zirBuiltinExtern( // TODO check duplicate extern - const new_decl_index = try sema.mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); - errdefer sema.mod.destroyDecl(new_decl_index); - const new_decl = sema.mod.declPtr(new_decl_index); + const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); + errdefer mod.destroyDecl(new_decl_index); + const new_decl = mod.declPtr(new_decl_index); new_decl.name = try sema.gpa.dupeZ(u8, options.name); { @@ -23117,12 +23122,12 @@ fn zirBuiltinExtern( new_decl.@"linksection" = null; new_decl.has_tv = true; new_decl.analysis = .complete; - new_decl.generation = sema.mod.generation; + new_decl.generation = mod.generation; try new_decl.finalizeNewArena(&new_decl_arena); } - try sema.mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); try sema.ensureDeclAnalyzed(new_decl_index); const ref = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); @@ -23209,7 +23214,7 @@ fn validateVarType( const msg = try sema.errMsg(block, src, "extern variable cannot have type '{}'", .{var_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), var_ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), var_ty, .other); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -23222,7 +23227,7 @@ fn validateVarType( errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl), var_ty); + try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), var_ty); if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) { try sema.errNote(block, src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{}); } @@ -23939,11 +23944,12 @@ fn safetyPanic( block: *Block, panic_id: PanicId, ) CompileError!void { + const mod = sema.mod; const panic_messages_ty = try sema.getBuiltinType("panic_messages"); const msg_decl_index = (try sema.namespaceLookup( block, sema.src, - panic_messages_ty.getNamespace().?, + panic_messages_ty.getNamespaceIndex(mod).unwrap().?, @tagName(panic_id), )).?; @@ -24006,7 +24012,7 @@ fn fieldVal( ); } else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); - const result_ty = try Type.ptr(sema.arena, sema.mod, .{ + const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = ptr_info.pointee_type.childType(mod), .sentinel = ptr_info.sentinel, .@"align" = ptr_info.@"align", @@ -24025,7 +24031,7 @@ fn fieldVal( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + .{ field_name, object_ty.fmt(mod) }, ); } }, @@ -24049,7 +24055,7 @@ fn fieldVal( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + .{ field_name, object_ty.fmt(mod) }, ); } } @@ -24071,14 +24077,14 @@ fn fieldVal( } const msg = msg: { const msg = try sema.errMsg(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(sema.mod), + field_name, child_type.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, child_type); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else (try sema.mod.getErrorValue(field_name)).key; + } else (try mod.getErrorValue(field_name)).key; return sema.addConstant( if (!child_type.isAnyError()) @@ -24089,7 +24095,7 @@ fn fieldVal( ); }, .Union => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } @@ -24107,7 +24113,7 @@ fn fieldVal( return sema.failWithBadMemberAccess(block, union_ty, field_name_src, field_name); }, .Enum => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } @@ -24119,7 +24125,7 @@ fn fieldVal( return sema.addConstant(try child_type.copy(arena), enum_val); }, .Struct, .Opaque => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } @@ -24128,7 +24134,7 @@ fn fieldVal( }, else => { const msg = msg: { - const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(mod)}); errdefer msg.destroy(sema.gpa); if (child_type.isSlice(mod)) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); @@ -24174,7 +24180,7 @@ fn fieldPtr( const object_ptr_ty = sema.typeOf(object_ptr); const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { .Pointer => object_ptr_ty.childType(mod), - else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}), + else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(mod)}), }; // Zig allows dereferencing a single pointer during field lookup. Note that @@ -24202,7 +24208,7 @@ fn fieldPtr( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + .{ field_name, object_ty.fmt(mod) }, ); } }, @@ -24218,7 +24224,7 @@ fn fieldPtr( const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); const slice_ptr_ty = inner_ty.slicePtrFieldType(buf, mod); - const result_ty = try Type.ptr(sema.arena, sema.mod, .{ + const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = slice_ptr_ty, .mutable = attr_ptr_ty.ptrIsMutable(mod), .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), @@ -24239,7 +24245,7 @@ fn fieldPtr( return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr); } else if (mem.eql(u8, field_name, "len")) { - const result_ty = try Type.ptr(sema.arena, sema.mod, .{ + const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = Type.usize, .mutable = attr_ptr_ty.ptrIsMutable(mod), .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), @@ -24264,7 +24270,7 @@ fn fieldPtr( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + .{ field_name, object_ty.fmt(mod) }, ); } }, @@ -24287,9 +24293,9 @@ fn fieldPtr( break :blk entry.key_ptr.*; } return sema.fail(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(sema.mod), + field_name, child_type.fmt(mod), }); - } else (try sema.mod.getErrorValue(field_name)).key; + } else (try mod.getErrorValue(field_name)).key; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -24303,7 +24309,7 @@ fn fieldPtr( )); }, .Union => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } @@ -24324,7 +24330,7 @@ fn fieldPtr( return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, .Enum => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } @@ -24342,14 +24348,14 @@ fn fieldPtr( )); }, .Struct, .Opaque => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, - else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}), + else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(mod)}), } }, .Struct => { @@ -24398,7 +24404,7 @@ fn fieldCallBind( const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C)) raw_ptr_ty.childType(mod) else - return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)}); + return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(mod)}); // Optionally dereference a second pointer to get the concrete type. const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One; @@ -24458,7 +24464,7 @@ fn fieldCallBind( // If we get here, we need to look for a decl in the struct type instead. const found_decl = switch (concrete_ty.zigTypeTag(mod)) { .Struct, .Opaque, .Union, .Enum => found_decl: { - if (concrete_ty.getNamespace()) |namespace| { + if (concrete_ty.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| { try sema.addReferencedBy(block, src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); @@ -24472,7 +24478,7 @@ fn fieldCallBind( first_param_type.zigTypeTag(mod) == .Pointer and (first_param_type.ptrSize(mod) == .One or first_param_type.ptrSize(mod) == .C) and - first_param_type.childType(mod).eql(concrete_ty, sema.mod))) + first_param_type.childType(mod).eql(concrete_ty, mod))) { // zig fmt: on // Note that if the param type is generic poison, we know that it must @@ -24484,7 +24490,7 @@ fn fieldCallBind( .func_inst = decl_val, .arg0_inst = object_ptr, } }; - } else if (first_param_type.eql(concrete_ty, sema.mod)) { + } else if (first_param_type.eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, @@ -24492,7 +24498,7 @@ fn fieldCallBind( } }; } else if (first_param_type.zigTypeTag(mod) == .Optional) { const child = first_param_type.optionalChild(mod); - if (child.eql(concrete_ty, sema.mod)) { + if (child.eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, @@ -24500,7 +24506,7 @@ fn fieldCallBind( } }; } else if (child.zigTypeTag(mod) == .Pointer and child.ptrSize(mod) == .One and - child.childType(mod).eql(concrete_ty, sema.mod)) + child.childType(mod).eql(concrete_ty, mod)) { return .{ .method = .{ .func_inst = decl_val, @@ -24508,7 +24514,7 @@ fn fieldCallBind( } }; } } else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and - first_param_type.errorUnionPayload().eql(concrete_ty, sema.mod)) + first_param_type.errorUnionPayload().eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ @@ -24526,12 +24532,12 @@ fn fieldCallBind( }; const msg = msg: { - const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ field_name, concrete_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ field_name, concrete_ty.fmt(mod) }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, concrete_ty); if (found_decl) |decl_idx| { - const decl = sema.mod.declPtr(decl_idx); - try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "'{s}' is not a member function", .{field_name}); + const decl = mod.declPtr(decl_idx); + try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{s}' is not a member function", .{field_name}); } break :msg msg; }; @@ -24549,7 +24555,7 @@ fn finishFieldCallBind( ) CompileError!ResolvedFieldCallee { const mod = sema.mod; const arena = sema.arena; - const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(arena, mod, .{ .pointee_type = field_ty, .mutable = ptr_ty.ptrIsMutable(mod), .@"addrspace" = ptr_ty.ptrAddressSpace(mod), @@ -24583,19 +24589,20 @@ fn namespaceLookup( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, + namespace: Namespace.Index, decl_name: []const u8, ) CompileError!?Decl.Index { + const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| { - const decl = sema.mod.declPtr(decl_index); - if (!decl.is_pub and decl.getFileScope() != block.getFileScope()) { + const decl = mod.declPtr(decl_index); + if (!decl.is_pub and decl.getFileScope(mod) != block.getFileScope(mod)) { const msg = msg: { const msg = try sema.errMsg(block, src, "'{s}' is not marked 'pub'", .{ decl_name, }); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "declared here", .{}); + try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -24609,7 +24616,7 @@ fn namespaceLookupRef( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, + namespace: Namespace.Index, decl_name: []const u8, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; @@ -24621,7 +24628,7 @@ fn namespaceLookupVal( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, + namespace: Namespace.Index, decl_name: []const u8, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; @@ -24692,7 +24699,7 @@ fn structFieldPtrByIndex( .@"addrspace" = struct_ptr_ty_info.@"addrspace", }; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); if (struct_obj.layout == .Packed) { comptime assert(Type.packed_struct_layout_version == 2); @@ -24746,7 +24753,7 @@ fn structFieldPtrByIndex( ptr_ty_data.@"align" = field.abi_align; } - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); + const ptr_field_ty = try Type.ptr(sema.arena, mod, ptr_ty_data); if (field.is_comptime) { const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ @@ -24848,16 +24855,17 @@ fn tupleFieldIndex( field_name: []const u8, field_name_src: LazySrcLoc, ) CompileError!u32 { + const mod = sema.mod; assert(!std.mem.eql(u8, field_name, "len")); if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { if (field_index < tuple_ty.structFieldCount()) return field_index; return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{ - field_name, tuple_ty.fmt(sema.mod), + field_name, tuple_ty.fmt(mod), }); } else |_| {} return sema.fail(block, field_name_src, "no field named '{s}' in tuple '{}'", .{ - field_name, tuple_ty.fmt(sema.mod), + field_name, tuple_ty.fmt(mod), }); } @@ -24913,7 +24921,7 @@ fn unionFieldPtr( const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; - const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(arena, mod, .{ .pointee_type = field.ty, .mutable = union_ptr_ty.ptrIsMutable(mod), .@"volatile" = union_ptr_ty.isVolatilePtr(mod), @@ -24947,7 +24955,7 @@ fn unionFieldPtr( .data = enum_field_index, }; const field_tag = Value.initPayload(&field_tag_buf.base); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod); + const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); if (!tag_matches) { const msg = msg: { const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; @@ -25017,7 +25025,7 @@ fn unionFieldVal( .data = enum_field_index, }; const field_tag = Value.initPayload(&field_tag_buf.base); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod); + const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); switch (union_obj.layout) { .Auto => { if (tag_matches) { @@ -25038,7 +25046,7 @@ fn unionFieldVal( if (tag_matches) { return sema.addConstant(field.ty, tag_and_val.val); } else { - const old_ty = union_ty.unionFieldType(tag_and_val.tag, sema.mod); + const old_ty = union_ty.unionFieldType(tag_and_val.tag, mod); if (try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0)) |new_val| { return sema.addConstant(field.ty, new_val); } @@ -25079,7 +25087,7 @@ fn elemPtr( const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) { .Pointer => indexable_ptr_ty.childType(mod), - else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}), + else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(mod)}), }; try checkIndexable(sema, block, src, indexable_ty); @@ -25124,7 +25132,7 @@ fn elemPtrOneLayerOnly( const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); + const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, mod); const result_ty = try sema.elemPtrType(indexable_ty, index); return sema.addConstant(result_ty, elem_ptr); }; @@ -25170,7 +25178,7 @@ fn elemVal( const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); + const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { return sema.addConstant(indexable_ty.elemType2(mod), elem_val); } @@ -25209,6 +25217,7 @@ fn validateRuntimeElemAccess( parent_ty: Type, parent_src: LazySrcLoc, ) CompileError!void { + const mod = sema.mod; const valid_rt = try sema.validateRunTimeType(elem_ty, false); if (!valid_rt) { const msg = msg: { @@ -25216,12 +25225,12 @@ fn validateRuntimeElemAccess( block, elem_index_src, "values of type '{}' must be comptime-known, but index value is runtime-known", - .{parent_ty.fmt(sema.mod)}, + .{parent_ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, parent_src.toSrcLoc(src_decl), parent_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsComptime(msg, parent_src.toSrcLoc(src_decl, mod), parent_ty); break :msg msg; }; @@ -25255,7 +25264,7 @@ fn tupleFieldPtr( } const field_ty = tuple_ty.structFieldType(field_index); - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = field_ty, .mutable = tuple_ptr_ty.ptrIsMutable(mod), .@"volatile" = tuple_ptr_ty.isVolatilePtr(mod), @@ -25431,7 +25440,7 @@ fn elemPtrArray( return sema.addConstUndef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, sema.mod); + const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr); } } @@ -25476,7 +25485,7 @@ fn elemValSlice( if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; - const slice_len = slice_val.sliceLen(sema.mod); + const slice_len = slice_val.sliceLen(mod); const slice_len_s = slice_len + @boolToInt(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); @@ -25487,7 +25496,7 @@ fn elemValSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, sema.mod); + const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod); if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } @@ -25500,7 +25509,7 @@ fn elemValSlice( try sema.requireRuntimeBlock(block, src, runtime_src); if (oob_safety and block.wantSafety()) { const len_inst = if (maybe_slice_val) |slice_val| - try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)) + try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod)) else try block.addTyOp(.slice_len, Type.usize, slice); const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -25537,7 +25546,7 @@ fn elemPtrSlice( if (slice_val.isUndef()) { return sema.addConstUndef(elem_ptr_ty); } - const slice_len = slice_val.sliceLen(sema.mod); + const slice_len = slice_val.sliceLen(mod); const slice_len_s = slice_len + @boolToInt(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); @@ -25547,7 +25556,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, sema.mod); + const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr_val); } } @@ -25560,7 +25569,7 @@ fn elemPtrSlice( const len_inst = len: { if (maybe_undef_slice_val) |slice_val| if (!slice_val.isUndef()) - break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)); + break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -25602,16 +25611,17 @@ const CoerceOpts = struct { fn get(info: @This(), sema: *Sema) !?Module.SrcLoc { if (info.func_inst == .none) return null; + const mod = sema.mod; const fn_decl = (try sema.funcDeclSrc(info.func_inst)) orelse return null; - const param_src = Module.paramSrc(0, sema.gpa, fn_decl, info.param_i); + const param_src = Module.paramSrc(0, mod, fn_decl, info.param_i); if (param_src == .node_offset_param) { return Module.SrcLoc{ - .file_scope = fn_decl.getFileScope(), + .file_scope = fn_decl.getFileScope(mod), .parent_decl_node = fn_decl.src_node, .lazy = LazySrcLoc.nodeOffset(param_src.node_offset_param), }; } - return param_src.toSrcLoc(fn_decl); + return param_src.toSrcLoc(fn_decl, mod); } } = .{}, }; @@ -25625,13 +25635,13 @@ fn coerceExtra( opts: CoerceOpts, ) CoersionError!Air.Inst.Ref { if (dest_ty_unresolved.isGenericPoison()) return inst; + const mod = sema.mod; const dest_ty_src = inst_src; // TODO better source location const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); const inst_ty = try sema.resolveTypeFields(sema.typeOf(inst)); - const mod = sema.mod; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); // If the types are the same, we can return the operand. - if (dest_ty.eql(inst_ty, sema.mod)) + if (dest_ty.eql(inst_ty, mod)) return inst; const arena = sema.arena; @@ -26254,7 +26264,7 @@ fn coerceExtra( const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "'noreturn' declared here", .{}); + try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "'noreturn' declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -26287,9 +26297,9 @@ fn coerceExtra( const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); if (inst_ty.isError(mod) and !dest_ty.isError(mod)) { - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function cannot return an error", .{}); + try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function cannot return an error", .{}); } else { - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function return type declared here", .{}); + try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function return type declared here", .{}); } } @@ -27246,7 +27256,7 @@ fn coerceVarArgParam( errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, inst_src.toSrcLoc(src_decl), coerced_ty, .param_ty); + try sema.explainWhyTypeIsNotExtern(msg, inst_src.toSrcLoc(src_decl, mod), coerced_ty, .param_ty); try sema.addDeclaredHereNote(msg, coerced_ty); break :msg msg; @@ -29186,13 +29196,14 @@ fn addReferencedBy( } fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { - const decl = sema.mod.declPtr(decl_index); + const mod = sema.mod; + const decl = mod.declPtr(decl_index); if (decl.analysis == .in_progress) { - const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(), "dependency loop detected", .{}); + const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(mod), "dependency loop detected", .{}); return sema.failWithOwnedErrorMsg(msg); } - sema.mod.ensureDeclAnalyzed(decl_index) catch |err| { + mod.ensureDeclAnalyzed(decl_index) catch |err| { if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; } else { @@ -31015,12 +31026,12 @@ fn resolvePeerTypes( // At this point, we hit a compile error. We need to recover // the source locations. const chosen_src = candidate_srcs.resolve( - sema.gpa, + mod, mod.declPtr(block.src_decl), chosen_i, ); const candidate_src = candidate_srcs.resolve( - sema.gpa, + mod, mod.declPtr(block.src_decl), candidate_i + 1, ); @@ -31315,7 +31326,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); - const zir = struct_obj.namespace.file_scope.zir; + const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -31353,7 +31364,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &struct_obj.namespace, + .namespace = struct_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -31399,7 +31410,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &struct_obj.namespace, + .namespace = struct_obj.namespace, .wip_capture_scope = undefined, .instructions = .{}, .inlining = null, @@ -31522,7 +31533,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_single, .error_set_inferred, .error_set_merged, - .@"opaque", .enum_simple, => false, @@ -31678,6 +31688,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => false, // values, not types .simple_value => unreachable, @@ -31991,6 +32002,8 @@ fn resolveInferredErrorSet( return sema.fail(block, src, "unable to resolve inferred error set", .{}); } + const mod = sema.mod; + // In order to ensure that all dependencies are properly added to the set, we // need to ensure the function body is analyzed of the inferred error set. // However, in the case of comptime/inline function calls with inferred error sets, @@ -32011,7 +32024,7 @@ fn resolveInferredErrorSet( const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{}); errdefer msg.destroy(sema.gpa); - try sema.mod.errNoteNonLazy(ies_func_owner_decl.srcLoc(), msg, "generic function declared here", .{}); + try sema.mod.errNoteNonLazy(ies_func_owner_decl.srcLoc(mod), msg, "generic function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -32049,7 +32062,7 @@ fn resolveInferredErrorSetTy( fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void { const gpa = mod.gpa; const decl_index = struct_obj.owner_decl; - const zir = struct_obj.namespace.file_scope.zir; + const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -32123,7 +32136,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &struct_obj.namespace, + .namespace = struct_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -32393,7 +32406,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const gpa = mod.gpa; const decl_index = union_obj.owner_decl; - const zir = union_obj.namespace.file_scope.zir; + const zir = mod.namespacePtr(union_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[union_obj.zir_index].extended; assert(extended.opcode == .union_decl); const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); @@ -32463,7 +32476,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &union_obj.namespace, + .namespace = union_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -32665,7 +32678,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const prev_field_index = union_obj.fields.getIndex(field_name).?; const prev_field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = prev_field_index }).lazy; - try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl), msg, "other field here", .{}); + try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "union declared here", .{}); break :msg msg; }; @@ -32929,7 +32942,7 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { const opt_ty_decl = (try sema.namespaceLookup( &block, src, - builtin_ty.getNamespace().?, + builtin_ty.getNamespaceIndex(mod).unwrap().?, name, )) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name}); return sema.analyzeDeclVal(&block, src, opt_ty_decl); @@ -32984,7 +32997,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .function, .array_sentinel, .error_set_inferred, - .@"opaque", .anyframe_T, .pointer, => return null, @@ -33123,7 +33135,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .inferred_alloc_mut => unreachable, }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) { return try mod.intValue(ty, 0); @@ -33131,7 +33143,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } }, - .ptr_type => return null, + .ptr_type => null, .array_type => |array_type| { if (array_type.len == 0) return Value.initTag(.empty_array); @@ -33152,7 +33164,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } }, - .error_union_type => return null, + .error_union_type => null, .simple_type => |t| switch (t) { .f16, .f32, @@ -33190,18 +33202,19 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .export_options, .extern_options, .type_info, - => return null, + => null, - .void => return Value.void, - .noreturn => return Value.@"unreachable", - .null => return Value.null, - .undefined => return Value.undef, + .void => Value.void, + .noreturn => Value.@"unreachable", + .null => Value.null, + .undefined => Value.undef, .generic_poison => return error.GenericPoison, .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => null, // values, not types .simple_value => unreachable, @@ -33606,7 +33619,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_single, .error_set_inferred, .error_set_merged, - .@"opaque", .enum_simple, => false, @@ -33772,6 +33784,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => false, // values, not types .simple_value => unreachable, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 237a55984ed0..b484e21424ad 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -764,8 +764,9 @@ pub fn deinit(func: *CodeGen) void { /// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError { + const mod = func.bin_file.base.options.module.?; const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(func.decl); + const src_loc = src.toSrcLoc(func.decl, mod); func.err_msg = try Module.ErrorMsg.create(func.gpa, src_loc, fmt, args); return error.CodegenFail; } @@ -6799,7 +6800,7 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { const mod = func.bin_file.base.options.module.?; - const enum_decl_index = enum_ty.getOwnerDecl(); + const enum_decl_index = enum_ty.getOwnerDecl(mod); var arena_allocator = std.heap.ArenaAllocator.init(func.gpa); defer arena_allocator.deinit(); diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index bfa5324dc6d8..45ad1d7eb377 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -254,7 +254,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { @setCold(true); std.debug.assert(emit.error_msg == null); const mod = emit.bin_file.base.options.module.?; - emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(), format, args); + emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(mod), format, args); return error.EmitFail; } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 1cfed06ff18a..4fb5267cb023 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -112,10 +112,10 @@ const Owner = union(enum) { mod_fn: *const Module.Fn, lazy_sym: link.File.LazySymbol, - fn getDecl(owner: Owner) Module.Decl.Index { + fn getDecl(owner: Owner, mod: *Module) Module.Decl.Index { return switch (owner) { .mod_fn => |mod_fn| mod_fn.owner_decl, - .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(), + .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(mod), }; } @@ -7926,6 +7926,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { } fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { + const mod = self.bin_file.options.module.?; switch (self.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { @@ -7944,7 +7945,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { // TODO: this might need adjusting like the linkers do. // Instead of flattening the owner and passing Decl.Index here we may // want to special case LazySymbol in DWARF linker too. - try dw.genArgDbgInfo(name, ty, self.owner.getDecl(), loc); + try dw.genArgDbgInfo(name, ty, self.owner.getDecl(mod), loc); }, .plan9 => {}, .none => {}, @@ -7958,6 +7959,7 @@ fn genVarDbgInfo( mcv: MCValue, name: [:0]const u8, ) !void { + const mod = self.bin_file.options.module.?; const is_ptr = switch (tag) { .dbg_var_ptr => true, .dbg_var_val => false, @@ -7988,7 +7990,7 @@ fn genVarDbgInfo( // TODO: this might need adjusting like the linkers do. // Instead of flattening the owner and passing Decl.Index here we may // want to special case LazySymbol in DWARF linker too. - try dw.genVarDbgInfo(name, ty, self.owner.getDecl(), is_ptr, loc); + try dw.genVarDbgInfo(name, ty, self.owner.getDecl(mod), is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -10936,7 +10938,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { try self.genLazySymbolRef( .call, .rax, - link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(), mod), + link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(mod), mod), ); return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); @@ -11651,7 +11653,8 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV } fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { - return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.owner.getDecl())) { + const mod = self.bin_file.options.module.?; + return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.owner.getDecl(mod))) { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 60f2d86a3d1d..36af222c7ed8 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -524,8 +524,9 @@ pub const DeclGen = struct { fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); + const mod = dg.module; const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(dg.decl.?); + const src_loc = src.toSrcLoc(dg.decl.?, mod); dg.error_msg = try Module.ErrorMsg.create(dg.gpa, src_loc, format, args); return error.AnalysisFail; } @@ -6484,6 +6485,7 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const inst_ty = f.typeOfIndex(inst); @@ -6495,7 +6497,7 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); try writer.print(" = {s}(", .{ - try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl() }, .{ .tag_name = enum_ty }), + try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl(mod) }, .{ .tag_name = enum_ty }), }); try f.writeCValue(writer, operand, .Other); try writer.writeAll(");\n"); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 082340085873..799f18e3e4c0 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1538,7 +1538,7 @@ pub const CType = extern union { .forward, .forward_parameter => { self.storage = .{ .fwd = .{ .base = .{ .tag = if (is_struct) .fwd_struct else .fwd_union }, - .data = ty.getOwnerDecl(), + .data = ty.getOwnerDecl(mod), } }; self.value = .{ .cty = initPayload(&self.storage.fwd) }; }, @@ -1985,7 +1985,7 @@ pub const CType = extern union { const unnamed_pl = try arena.create(Payload.Unnamed); unnamed_pl.* = .{ .base = .{ .tag = t }, .data = .{ .fields = fields_pl, - .owner_decl = ty.getOwnerDecl(), + .owner_decl = ty.getOwnerDecl(mod), .id = if (ty.unionTagTypeSafety()) |_| 0 else unreachable, } }; return initPayload(unnamed_pl); @@ -2124,7 +2124,7 @@ pub const CType = extern union { .forward, .forward_parameter, .complete, .parameter, .global => unreachable, .payload => if (ty.unionTagTypeSafety()) |_| { const data = cty.cast(Payload.Unnamed).?.data; - return ty.getOwnerDecl() == data.owner_decl and data.id == 0; + return ty.getOwnerDecl(mod) == data.owner_decl and data.id == 0; } else unreachable, }, @@ -2242,7 +2242,7 @@ pub const CType = extern union { => switch (self.kind) { .forward, .forward_parameter, .complete, .parameter, .global => unreachable, .payload => if (ty.unionTagTypeSafety()) |_| { - autoHash(hasher, ty.getOwnerDecl()); + autoHash(hasher, ty.getOwnerDecl(mod)); autoHash(hasher, @as(u32, 0)); } else unreachable, }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5f013c38ec18..10cf66a69abc 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1177,7 +1177,7 @@ pub const Object = struct { var di_scope: ?*llvm.DIScope = null; if (dg.object.di_builder) |dib| { - di_file = try dg.object.getDIFile(gpa, decl.src_namespace.file_scope); + di_file = try dg.object.getDIFile(gpa, mod.namespacePtr(decl.src_namespace).file_scope); const line_number = decl.src_line + 1; const is_internal_linkage = decl.val.tag() != .extern_fn and @@ -1505,7 +1505,7 @@ pub const Object = struct { return di_type; }, .Enum => { - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const owner_decl = o.module.declPtr(owner_decl_index); if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -1558,7 +1558,7 @@ pub const Object = struct { @panic("TODO implement bigint debug enumerators to llvm int for 32-bit compiler builds"); } - const di_file = try o.getDIFile(gpa, owner_decl.src_namespace.file_scope); + const di_file = try o.getDIFile(gpa, mod.namespacePtr(owner_decl.src_namespace).file_scope); const di_scope = try o.namespaceToDebugScope(owner_decl.src_namespace); const name = try ty.nameAlloc(gpa, o.module); @@ -1737,13 +1737,13 @@ pub const Object = struct { } const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const owner_decl = o.module.declPtr(owner_decl_index); const opaque_di_ty = dib.createForwardDeclType( DW.TAG.structure_type, name, try o.namespaceToDebugScope(owner_decl.src_namespace), - try o.getDIFile(gpa, owner_decl.src_namespace.file_scope), + try o.getDIFile(gpa, mod.namespacePtr(owner_decl.src_namespace).file_scope), owner_decl.src_node + 1, ); // The recursive call to `lowerDebugType` va `namespaceToDebugScope` @@ -2085,7 +2085,7 @@ pub const Object = struct { // into. Therefore we can satisfy this by making an empty namespace, // rather than changing the frontend to unnecessarily resolve the // struct field types. - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, struct_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` @@ -2096,7 +2096,7 @@ pub const Object = struct { } if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, struct_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` @@ -2162,7 +2162,7 @@ pub const Object = struct { }, .Union => { const compile_unit_scope = o.di_compile_unit.?.toScope(); - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); @@ -2395,8 +2395,10 @@ pub const Object = struct { } } - fn namespaceToDebugScope(o: *Object, namespace: *const Module.Namespace) !*llvm.DIScope { - if (namespace.parent == null) { + fn namespaceToDebugScope(o: *Object, namespace_index: Module.Namespace.Index) !*llvm.DIScope { + const mod = o.module; + const namespace = mod.namespacePtr(namespace_index); + if (namespace.parent == .none) { const di_file = try o.getDIFile(o.gpa, namespace.file_scope); return di_file.toScope(); } @@ -2408,12 +2410,13 @@ pub const Object = struct { /// Assertion `!isa(Scope) && "shouldn't make a namespace scope for a type"' /// when targeting CodeView (Windows). fn makeEmptyNamespaceDIType(o: *Object, decl_index: Module.Decl.Index) !*llvm.DIType { - const decl = o.module.declPtr(decl_index); + const mod = o.module; + const decl = mod.declPtr(decl_index); const fields: [0]*llvm.DIType = .{}; return o.di_builder.?.createStructType( try o.namespaceToDebugScope(decl.src_namespace), decl.name, // TODO use fully qualified name - try o.getDIFile(o.gpa, decl.src_namespace.file_scope), + try o.getDIFile(o.gpa, mod.namespacePtr(decl.src_namespace).file_scope), decl.src_line + 1, 0, // size in bits 0, // align in bits @@ -2434,14 +2437,14 @@ pub const Object = struct { const std_file = (mod.importPkg(std_pkg) catch unreachable).file; const builtin_str: []const u8 = "builtin"; - const std_namespace = mod.declPtr(std_file.root_decl.unwrap().?).src_namespace; + const std_namespace = mod.namespacePtr(mod.declPtr(std_file.root_decl.unwrap().?).src_namespace); const builtin_decl = std_namespace.decls .getKeyAdapted(builtin_str, Module.DeclAdapter{ .mod = mod }).?; const stack_trace_str: []const u8 = "StackTrace"; // buffer is only used for int_type, `builtin` is a struct. const builtin_ty = mod.declPtr(builtin_decl).val.toType(); - const builtin_namespace = builtin_ty.getNamespace().?; + const builtin_namespace = builtin_ty.getNamespace(mod).?; const stack_trace_decl_index = builtin_namespace.decls .getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .mod = mod }).?; const stack_trace_decl = mod.declPtr(stack_trace_decl_index); @@ -2464,7 +2467,8 @@ pub const DeclGen = struct { fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); assert(self.err_msg == null); - const src_loc = LazySrcLoc.nodeOffset(0).toSrcLoc(self.decl); + const mod = self.module; + const src_loc = LazySrcLoc.nodeOffset(0).toSrcLoc(self.decl, mod); self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, "TODO (LLVM): " ++ format, args); return error.CodegenFail; } @@ -2536,7 +2540,7 @@ pub const DeclGen = struct { } if (dg.object.di_builder) |dib| { - const di_file = try dg.object.getDIFile(dg.gpa, decl.src_namespace.file_scope); + const di_file = try dg.object.getDIFile(dg.gpa, mod.namespacePtr(decl.src_namespace).file_scope); const line_number = decl.src_line + 1; const is_internal_linkage = !dg.module.decl_exports.contains(decl_index); @@ -2837,15 +2841,11 @@ pub const DeclGen = struct { .Opaque => { if (t.ip_index == .anyopaque_type) return dg.context.intType(8); - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); + const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = mod }); if (gop.found_existing) return gop.value_ptr.*; - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - - const opaque_obj = t.castTag(.@"opaque").?.data; - const name = try opaque_obj.getFullyQualifiedName(dg.module); + const opaque_type = mod.intern_pool.indexToKey(t.ip_index).opaque_type; + const name = try mod.opaqueFullyQualifiedName(opaque_type); defer gpa.free(name); const llvm_struct_ty = dg.context.structCreateNamed(name); @@ -2931,7 +2931,7 @@ pub const DeclGen = struct { }, .ErrorSet => return dg.context.intType(16), .Struct => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); + const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = mod }); if (gop.found_existing) return gop.value_ptr.*; // The Type memory is ephemeral; since we want to store a longer-lived @@ -2999,7 +2999,7 @@ pub const DeclGen = struct { return int_llvm_ty; } - const name = try struct_obj.getFullyQualifiedName(dg.module); + const name = try struct_obj.getFullyQualifiedName(mod); defer gpa.free(name); const llvm_struct_ty = dg.context.structCreateNamed(name); @@ -3057,7 +3057,7 @@ pub const DeclGen = struct { return llvm_struct_ty; }, .Union => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); + const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = mod }); if (gop.found_existing) return gop.value_ptr.*; // The Type memory is ephemeral; since we want to store a longer-lived @@ -3080,7 +3080,7 @@ pub const DeclGen = struct { return enum_tag_llvm_ty; } - const name = try union_obj.getFullyQualifiedName(dg.module); + const name = try union_obj.getFullyQualifiedName(mod); defer gpa.free(name); const llvm_union_ty = dg.context.structCreateNamed(name); @@ -6131,7 +6131,7 @@ pub const FuncGen = struct { const func = self.air.values[ty_pl.payload].castTag(.function).?.data; const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); - const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope); + const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; const line_number = decl.src_line + 1; const cur_debug_location = self.builder.getCurrentDebugLocation2(); @@ -6193,7 +6193,7 @@ pub const FuncGen = struct { const func = self.air.values[ty_pl.payload].castTag(.function).?.data; const mod = self.dg.module; const decl = mod.declPtr(func.owner_decl); - const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope); + const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; const old = self.dbg_inlined.pop(); self.di_scope = old.scope; @@ -8853,7 +8853,8 @@ pub const FuncGen = struct { } fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { - const enum_decl = enum_ty.getOwnerDecl(); + const mod = self.dg.module; + const enum_decl = enum_ty.getOwnerDecl(mod); // TODO: detect when the type changes and re-emit this function. const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_decl); @@ -8864,7 +8865,6 @@ pub const FuncGen = struct { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const mod = self.dg.module; const fqn = try mod.declPtr(enum_decl).getFullyQualifiedName(mod); defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); @@ -8931,7 +8931,8 @@ pub const FuncGen = struct { } fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { - const enum_decl = enum_ty.getOwnerDecl(); + const mod = self.dg.module; + const enum_decl = enum_ty.getOwnerDecl(mod); // TODO: detect when the type changes and re-emit this function. const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_decl); @@ -8942,7 +8943,6 @@ pub const FuncGen = struct { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const mod = self.dg.module; const fqn = try mod.declPtr(enum_decl).getFullyQualifiedName(mod); defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 843b67e4262a..52f94cc6d555 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -218,8 +218,9 @@ pub const DeclGen = struct { pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); + const mod = self.module; const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index)); + const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index), mod); assert(self.error_msg == null); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args); return error.CodegenFail; @@ -2775,7 +2776,10 @@ pub const DeclGen = struct { fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; - const src_fname_id = try self.spv.resolveSourceFileName(self.module.declPtr(self.decl_index)); + const src_fname_id = try self.spv.resolveSourceFileName( + self.module, + self.module.declPtr(self.decl_index), + ); try self.func.body.emit(self.spv.gpa, .OpLine, .{ .file = src_fname_id, .line = dbg_stmt.line, @@ -3192,6 +3196,7 @@ pub const DeclGen = struct { } fn airAssembly(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); @@ -3274,7 +3279,7 @@ pub const DeclGen = struct { assert(as.errors.items.len != 0); assert(self.error_msg == null); const loc = LazySrcLoc.nodeOffset(0); - const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index)); + const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index), mod); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len); diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index c5ba429ec9a8..d53dcb4368ca 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -390,8 +390,8 @@ pub fn addFunction(self: *Module, decl_index: Decl.Index, func: Fn) !void { /// Fetch the result-id of an OpString instruction that encodes the path of the source /// file of the decl. This function may also emit an OpSource with source-level information regarding /// the decl. -pub fn resolveSourceFileName(self: *Module, decl: *ZigDecl) !IdRef { - const path = decl.getFileScope().sub_file_path; +pub fn resolveSourceFileName(self: *Module, zig_module: *ZigModule, zig_decl: *ZigDecl) !IdRef { + const path = zig_decl.getFileScope(zig_module).sub_file_path; const result = try self.source_file_names.getOrPut(self.gpa, path); if (!result.found_existing) { const file_result_id = self.allocId(); diff --git a/src/crash_report.zig b/src/crash_report.zig index b2e3018de66c..57b870c198c0 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -99,7 +99,7 @@ fn dumpStatusReport() !void { allocator, anal.body, anal.body_index, - block.namespace.file_scope, + mod.namespacePtr(block.namespace).file_scope, block_src_decl.src_node, 6, // indent stderr, @@ -108,7 +108,7 @@ fn dumpStatusReport() !void { else => |e| return e, }; try stderr.writeAll(" For full context, use the command\n zig ast-check -t "); - try writeFilePath(block.namespace.file_scope, stderr); + try writeFilePath(mod.namespacePtr(block.namespace).file_scope, stderr); try stderr.writeAll("\n\n"); var parent = anal.parent; @@ -121,7 +121,7 @@ fn dumpStatusReport() !void { print_zir.renderSingleInstruction( allocator, curr.body[curr.body_index], - curr.block.namespace.file_scope, + mod.namespacePtr(curr.block.namespace).file_scope, curr_block_src_decl.src_node, 6, // indent stderr, @@ -148,7 +148,7 @@ fn writeFilePath(file: *Module.File, stream: anytype) !void { } fn writeFullyQualifiedDeclWithFile(mod: *Module, decl: *Decl, stream: anytype) !void { - try writeFilePath(decl.getFileScope(), stream); + try writeFilePath(decl.getFileScope(mod), stream); try stream.writeAll(": "); try decl.renderFullyQualifiedDebugName(mod, stream); } diff --git a/src/link.zig b/src/link.zig index 471b26ae9fca..ac764f06f842 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1129,8 +1129,8 @@ pub const File = struct { Type.anyerror }; } - pub fn getDecl(self: LazySymbol) Module.Decl.OptionalIndex { - return Module.Decl.OptionalIndex.init(self.ty.getOwnerDeclOrNull()); + pub fn getDecl(self: LazySymbol, mod: *Module) Module.Decl.OptionalIndex { + return Module.Decl.OptionalIndex.init(self.ty.getOwnerDeclOrNull(mod)); } }; diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 6117f1c1de13..4e75cfff97a0 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1032,20 +1032,20 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void { self.getAtomPtr(atom_index).sym_index = 0; } -pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Coff, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| { - return llvm_object.updateFunc(module, func, air, liveness); + return llvm_object.updateFunc(mod, func, air, liveness); } } const tracy = trace(@src()); defer tracy.end(); const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); self.freeUnnamedConsts(decl_index); @@ -1056,7 +1056,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live const res = try codegen.generateFunction( &self.base, - decl.srcLoc(), + decl.srcLoc(mod), func, air, liveness, @@ -1067,7 +1067,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -1076,7 +1076,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live // Since we updated the vaddr and the size, each corresponding export // symbol also needs to be updated. - return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.Index) !u32 { @@ -1110,7 +1110,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1); } - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .none, .{ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?, }); var code = switch (res) { @@ -1141,19 +1141,19 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In pub fn updateDecl( self: *Coff, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, ) link.File.UpdateDeclError!void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); } const tracy = trace(@src()); defer tracy.end(); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? @@ -1173,7 +1173,7 @@ pub fn updateDecl( defer code_buffer.deinit(); const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ @@ -1183,7 +1183,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -1192,7 +1192,7 @@ pub fn updateDecl( // Since we updated the vaddr and the size, each corresponding export // symbol also needs to be updated. - return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } fn updateLazySymbolAtom( @@ -1217,8 +1217,8 @@ fn updateLazySymbolAtom( const atom = self.getAtomPtr(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl| - mod.declPtr(owner_decl).srcLoc() + const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl| + mod.declPtr(owner_decl).srcLoc(mod) else Module.SrcLoc{ .file_scope = undefined, @@ -1262,7 +1262,8 @@ fn updateLazySymbolAtom( } pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Atom.Index { - const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); + const mod = self.base.options.module.?; + const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod)); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { @@ -1277,7 +1278,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Ato metadata.state.* = .pending_flush; const atom = metadata.atom.*; // anyerror needs to be deferred until flushModule - if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.rdata_section_index.?, }); @@ -1411,7 +1412,7 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void { pub fn updateDeclExports( self: *Coff, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) link.File.UpdateDeclExportsError!void { @@ -1423,7 +1424,7 @@ pub fn updateDeclExports( // Even in the case of LLVM, we need to notice certain exported symbols in order to // detect the default subsystem. for (exports) |exp| { - const exported_decl = module.declPtr(exp.exported_decl); + const exported_decl = mod.declPtr(exp.exported_decl); if (exported_decl.getFunction() == null) continue; const winapi_cc = switch (self.base.options.target.cpu.arch) { .x86 => std.builtin.CallingConvention.Stdcall, @@ -1433,23 +1434,23 @@ pub fn updateDeclExports( if (decl_cc == .C and mem.eql(u8, exp.options.name, "main") and self.base.options.link_libc) { - module.stage1_flags.have_c_main = true; + mod.stage1_flags.have_c_main = true; } else if (decl_cc == winapi_cc and self.base.options.target.os.tag == .windows) { if (mem.eql(u8, exp.options.name, "WinMain")) { - module.stage1_flags.have_winmain = true; + mod.stage1_flags.have_winmain = true; } else if (mem.eql(u8, exp.options.name, "wWinMain")) { - module.stage1_flags.have_wwinmain = true; + mod.stage1_flags.have_wwinmain = true; } else if (mem.eql(u8, exp.options.name, "WinMainCRTStartup")) { - module.stage1_flags.have_winmain_crt_startup = true; + mod.stage1_flags.have_winmain_crt_startup = true; } else if (mem.eql(u8, exp.options.name, "wWinMainCRTStartup")) { - module.stage1_flags.have_wwinmain_crt_startup = true; + mod.stage1_flags.have_wwinmain_crt_startup = true; } else if (mem.eql(u8, exp.options.name, "DllMainCRTStartup")) { - module.stage1_flags.have_dllmain_crt_startup = true; + mod.stage1_flags.have_dllmain_crt_startup = true; } } } - if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(mod, decl_index, exports); } const tracy = trace(@src()); @@ -1457,7 +1458,7 @@ pub fn updateDeclExports( const gpa = self.base.allocator; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); const atom = self.getAtom(atom_index); const decl_sym = atom.getSymbol(self); @@ -1468,12 +1469,12 @@ pub fn updateDeclExports( if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, ".text")) { - try module.failed_exports.putNoClobber( - module.gpa, + try mod.failed_exports.putNoClobber( + mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: ExportOptions.section", .{}, ), @@ -1483,12 +1484,12 @@ pub fn updateDeclExports( } if (exp.options.linkage == .LinkOnce) { - try module.failed_exports.putNoClobber( - module.gpa, + try mod.failed_exports.putNoClobber( + mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: GlobalLinkage.LinkOnce", .{}, ), diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index c971b5b26fdd..0561ccbfdac2 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2597,7 +2597,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { fn addDIFile(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !u28 { const decl = mod.declPtr(decl_index); - const file_scope = decl.getFileScope(); + const file_scope = decl.getFileScope(mod); const gop = try self.di_files.getOrPut(self.allocator, file_scope); if (!gop.found_existing) { switch (self.bin_file.tag) { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 7bd36a9b6091..c80d60d72a24 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2414,7 +2414,8 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void { } pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol) !Atom.Index { - const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); + const mod = self.base.options.module.?; + const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod)); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { @@ -2429,7 +2430,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol) !Atom.Inde metadata.state.* = .pending_flush; const atom = metadata.atom.*; // anyerror needs to be deferred until flushModule - if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.rodata_section_index.?, }); @@ -2573,19 +2574,19 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s return local_sym; } -pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Elf, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); } const tracy = trace(@src()); defer tracy.end(); const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); self.freeUnnamedConsts(decl_index); @@ -2594,28 +2595,28 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null; + var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| - try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .{ .dwarf = ds, }) else - try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .none); const code = switch (res) { .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_FUNC); if (decl_state) |*ds| { try self.dwarf.?.commitDeclState( - module, + mod, decl_index, local_sym.st_value, local_sym.st_size, @@ -2625,25 +2626,25 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven // Since we updated the vaddr and the size, each corresponding export // symbol also needs to be updated. - return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn updateDecl( self: *Elf, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, ) File.UpdateDeclError!void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); } const tracy = trace(@src()); defer tracy.end(); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? @@ -2662,13 +2663,13 @@ pub fn updateDecl( var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null; + var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); // TODO implement .debug_info for global variables const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; const res = if (decl_state) |*ds| - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ @@ -2677,7 +2678,7 @@ pub fn updateDecl( .parent_atom_index = atom.getSymbolIndex().?, }) else - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ @@ -2688,7 +2689,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -2696,7 +2697,7 @@ pub fn updateDecl( const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_OBJECT); if (decl_state) |*ds| { try self.dwarf.?.commitDeclState( - module, + mod, decl_index, local_sym.st_value, local_sym.st_size, @@ -2706,7 +2707,7 @@ pub fn updateDecl( // Since we updated the vaddr and the size, each corresponding export // symbol also needs to be updated. - return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } fn updateLazySymbolAtom( @@ -2735,8 +2736,8 @@ fn updateLazySymbolAtom( const atom = self.getAtom(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl| - mod.declPtr(owner_decl).srcLoc() + const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl| + mod.declPtr(owner_decl).srcLoc(mod) else Module.SrcLoc{ .file_scope = undefined, @@ -2812,7 +2813,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module const atom_index = try self.createAtom(); - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), typed_value, &code_buffer, .{ .none = {}, }, .{ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?, @@ -2853,7 +2854,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module pub fn updateDeclExports( self: *Elf, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) File.UpdateDeclExportsError!void { @@ -2861,7 +2862,7 @@ pub fn updateDeclExports( @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(mod, decl_index, exports); } const tracy = trace(@src()); @@ -2869,7 +2870,7 @@ pub fn updateDeclExports( const gpa = self.base.allocator; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); const atom = self.getAtom(atom_index); const decl_sym = atom.getSymbol(self); @@ -2881,10 +2882,10 @@ pub fn updateDeclExports( for (exports) |exp| { if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, ".text")) { - try module.failed_exports.ensureUnusedCapacity(module.gpa, 1); - module.failed_exports.putAssumeCapacityNoClobber( + try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); + mod.failed_exports.putAssumeCapacityNoClobber( exp, - try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}), + try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(mod), "Unimplemented: ExportOptions.section", .{}), ); continue; } @@ -2900,10 +2901,10 @@ pub fn updateDeclExports( }, .Weak => elf.STB_WEAK, .LinkOnce => { - try module.failed_exports.ensureUnusedCapacity(module.gpa, 1); - module.failed_exports.putAssumeCapacityNoClobber( + try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); + mod.failed_exports.putAssumeCapacityNoClobber( exp, - try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}), + try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(mod), "Unimplemented: GlobalLinkage.LinkOnce", .{}), ); continue; }, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 306661c5c58e..06f79cf3fb56 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1847,18 +1847,18 @@ fn addStubEntry(self: *MachO, target: SymbolWithLoc) !void { self.markRelocsDirtyByTarget(target); } -pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *MachO, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); } const tracy = trace(@src()); defer tracy.end(); const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); self.freeUnnamedConsts(decl_index); @@ -1868,23 +1868,23 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv defer code_buffer.deinit(); var decl_state = if (self.d_sym) |*d_sym| - try d_sym.dwarf.initDeclState(module, decl_index) + try d_sym.dwarf.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| - try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .{ .dwarf = ds, }) else - try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .none); var code = switch (res) { .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -1893,7 +1893,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv if (decl_state) |*ds| { try self.d_sym.?.dwarf.commitDeclState( - module, + mod, decl_index, addr, self.getAtom(atom_index).size, @@ -1903,7 +1903,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv // Since we updated the vaddr and the size, each corresponding export symbol also // needs to be updated. - try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + try self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 { @@ -1912,15 +1912,15 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); - const module = self.base.options.module.?; + const mod = self.base.options.module.?; const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index); if (!gop.found_existing) { gop.value_ptr.* = .{}; } const unnamed_consts = gop.value_ptr; - const decl = module.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(module); + const decl = mod.declPtr(decl_index); + const decl_name = try decl.getFullyQualifiedName(mod); defer gpa.free(decl_name); const name_str_index = blk: { @@ -1935,20 +1935,19 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu const atom_index = try self.createAtom(); - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), typed_value, &code_buffer, .none, .{ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?, }); var code = switch (res) { .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, }; - const mod = self.base.options.module.?; const required_alignment = typed_value.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); atom.size = code.len; @@ -1972,17 +1971,17 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu return atom.getSymbolIndex().?; } -pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void { +pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); } const tracy = trace(@src()); defer tracy.end(); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? @@ -1998,7 +1997,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) payload.data.is_threadlocal and !self.base.options.single_threaded else false; - if (is_threadlocal) return self.updateThreadlocalVariable(module, decl_index); + if (is_threadlocal) return self.updateThreadlocalVariable(mod, decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); const sym_index = self.getAtom(atom_index).getSymbolIndex().?; @@ -2008,14 +2007,14 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) defer code_buffer.deinit(); var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym| - try d_sym.dwarf.initDeclState(module, decl_index) + try d_sym.dwarf.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; const res = if (decl_state) |*ds| - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ @@ -2024,7 +2023,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) .parent_atom_index = sym_index, }) else - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ @@ -2035,7 +2034,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -2043,7 +2042,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) if (decl_state) |*ds| { try self.d_sym.?.dwarf.commitDeclState( - module, + mod, decl_index, addr, self.getAtom(atom_index).size, @@ -2053,7 +2052,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) // Since we updated the vaddr and the size, each corresponding export symbol also // needs to be updated. - try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + try self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } fn updateLazySymbolAtom( @@ -2082,8 +2081,8 @@ fn updateLazySymbolAtom( const atom = self.getAtomPtr(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl| - mod.declPtr(owner_decl).srcLoc() + const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl| + mod.declPtr(owner_decl).srcLoc(mod) else Module.SrcLoc{ .file_scope = undefined, @@ -2127,7 +2126,8 @@ fn updateLazySymbolAtom( } pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.Index { - const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); + const mod = self.base.options.module.?; + const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod)); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { @@ -2145,7 +2145,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.In metadata.state.* = .pending_flush; const atom = metadata.atom.*; // anyerror needs to be deferred until flushModule - if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.data_const_section_index.?, }); @@ -2179,7 +2179,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D const decl_metadata = self.decls.get(decl_index).?; const decl_val = decl.val.castTag(.variable).?.data.init; const res = if (decl_state) |*ds| - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ @@ -2188,7 +2188,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D .parent_atom_index = init_sym_index, }) else - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ @@ -2379,7 +2379,7 @@ pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: Module.De pub fn updateDeclExports( self: *MachO, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) File.UpdateDeclExportsError!void { @@ -2388,7 +2388,7 @@ pub fn updateDeclExports( } if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| - return llvm_object.updateDeclExports(module, decl_index, exports); + return llvm_object.updateDeclExports(mod, decl_index, exports); } const tracy = trace(@src()); @@ -2396,7 +2396,7 @@ pub fn updateDeclExports( const gpa = self.base.allocator; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); const atom = self.getAtom(atom_index); const decl_sym = atom.getSymbol(self); @@ -2410,12 +2410,12 @@ pub fn updateDeclExports( if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, "__text")) { - try module.failed_exports.putNoClobber( - module.gpa, + try mod.failed_exports.putNoClobber( + mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: ExportOptions.section", .{}, ), @@ -2425,12 +2425,12 @@ pub fn updateDeclExports( } if (exp.options.linkage == .LinkOnce) { - try module.failed_exports.putNoClobber( - module.gpa, + try mod.failed_exports.putNoClobber( + mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: GlobalLinkage.LinkOnce", .{}, ), @@ -2474,9 +2474,9 @@ pub fn updateDeclExports( // TODO: this needs rethinking const global = self.getGlobal(exp_name).?; if (sym_loc.sym_index != global.sym_index and global.file != null) { - _ = try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create( + _ = try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), \\LinkError: symbol '{s}' defined multiple times , .{exp_name}, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 7a389a789d9b..968cbb0e7ec4 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -213,14 +213,14 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void { const gpa = self.base.allocator; const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope()); + const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope(mod)); if (fn_map_res.found_existing) { if (try fn_map_res.value_ptr.functions.fetchPut(gpa, decl_index, out)) |old_entry| { gpa.free(old_entry.value.code); gpa.free(old_entry.value.lineinfo); } } else { - const file = decl.getFileScope(); + const file = decl.getFileScope(mod); const arena = self.path_arena.allocator(); // each file gets a symbol fn_map_res.value_ptr.* = .{ @@ -276,13 +276,13 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi } } -pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Plan9, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); self.freeUnnamedConsts(decl_index); _ = try self.seeDecl(decl_index); @@ -298,7 +298,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv const res = try codegen.generateFunction( &self.base, - decl.srcLoc(), + decl.srcLoc(mod), func, air, liveness, @@ -316,7 +316,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv .ok => try code_buffer.toOwnedSlice(), .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -366,7 +366,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I }; self.syms.items[info.sym_index.?] = sym; - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .{ .none = {}, }, .{ .parent_atom_index = @enumToInt(decl_index), @@ -388,8 +388,8 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I return @intCast(u32, info.got_index.?); } -pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) !void { - const decl = module.declPtr(decl_index); +pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void { + const decl = mod.declPtr(decl_index); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? @@ -409,7 +409,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) defer code_buffer.deinit(); const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ .none = {} }, .{ @@ -419,7 +419,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -707,7 +707,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const code = blk: { const is_fn = source_decl.ty.zigTypeTag(mod) == .Fn; if (is_fn) { - const table = self.fn_decl_table.get(source_decl.getFileScope()).?.functions; + const table = self.fn_decl_table.get(source_decl.getFileScope(mod)).?.functions; const output = table.get(source_decl_index).?; break :blk output.code; } else { @@ -729,7 +729,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No } fn addDeclExports( self: *Plan9, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { @@ -740,9 +740,9 @@ fn addDeclExports( // plan9 does not support custom sections if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) { - try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( self.base.allocator, - module.declPtr(decl_index).srcLoc(), + mod.declPtr(decl_index).srcLoc(mod), "plan9 does not support extra sections", .{}, )); @@ -773,7 +773,7 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { const decl = mod.declPtr(decl_index); const is_fn = (decl.val.tag() == .function); if (is_fn) { - var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope()).?; + var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?; var submap = symidx_and_submap.functions; if (submap.fetchSwapRemove(decl_index)) |removed_entry| { self.base.allocator.free(removed_entry.value.code); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index fb7ca3a87fc3..ddf5130fd291 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1348,7 +1348,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes defer code_writer.deinit(); // const result = try codegen.generateFunction( // &wasm.base, - // decl.srcLoc(), + // decl.srcLoc(mod), // func, // air, // liveness, @@ -1357,7 +1357,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes // ); const result = try codegen.generateFunction( &wasm.base, - decl.srcLoc(), + decl.srcLoc(mod), func, air, liveness, @@ -1425,7 +1425,7 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi const res = try codegen.generateSymbol( &wasm.base, - decl.srcLoc(), + decl.srcLoc(mod), .{ .ty = decl.ty, .val = val }, &code_writer, .none, @@ -1554,7 +1554,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In const result = try codegen.generateSymbol( &wasm.base, - decl.srcLoc(), + decl.srcLoc(mod), tv, &value_bytes, .none, @@ -1693,7 +1693,7 @@ pub fn updateDeclExports( if (exp.options.section) |section| { try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( mod.gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: ExportOptions.section '{s}'", .{section}, )); @@ -1712,7 +1712,7 @@ pub fn updateDeclExports( if (!exp_is_weak and !existing_sym.isWeak()) { try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( mod.gpa, - decl.srcLoc(), + decl.srcLoc(mod), \\LinkError: symbol '{s}' defined multiple times \\ first definition in '{s}' \\ next definition in '{s}' @@ -1745,7 +1745,7 @@ pub fn updateDeclExports( .LinkOnce => { try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( mod.gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: LinkOnce", .{}, )); diff --git a/src/type.zig b/src/type.zig index e7dad914221a..2870b5616f70 100644 --- a/src/type.zig +++ b/src/type.zig @@ -42,8 +42,6 @@ pub const Type = struct { .error_set_merged, => return .ErrorSet, - .@"opaque" => return .Opaque, - .function => return .Fn, .array, @@ -87,6 +85,7 @@ pub const Type = struct { .error_union_type => return .ErrorUnion, .struct_type => return .Struct, .union_type => return .Union, + .opaque_type => return .Opaque, .simple_type => |s| switch (s) { .f16, .f32, @@ -361,12 +360,6 @@ pub const Type = struct { return true; }, - .@"opaque" => { - const opaque_obj_a = a.castTag(.@"opaque").?.data; - const opaque_obj_b = (b.castTag(.@"opaque") orelse return false).data; - return opaque_obj_a == opaque_obj_b; - }, - .function => { if (b.zigTypeTag(mod) != .Fn) return false; @@ -649,12 +642,6 @@ pub const Type = struct { std.hash.autoHash(hasher, ies); }, - .@"opaque" => { - std.hash.autoHash(hasher, std.builtin.TypeId.Opaque); - const opaque_obj = ty.castTag(.@"opaque").?.data; - std.hash.autoHash(hasher, opaque_obj); - }, - .function => { std.hash.autoHash(hasher, std.builtin.TypeId.Fn); @@ -974,7 +961,6 @@ pub const Type = struct { .enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple), .enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered), .enum_full, .enum_nonexhaustive => return self.copyPayloadShallow(allocator, Payload.EnumFull), - .@"opaque" => return self.copyPayloadShallow(allocator, Payload.Opaque), } } @@ -1079,12 +1065,6 @@ pub const Type = struct { @tagName(t), enum_numbered.owner_decl, }); }, - .@"opaque" => { - const opaque_obj = ty.castTag(.@"opaque").?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), opaque_obj.owner_decl, - }); - }, .function => { const payload = ty.castTag(.function).?.data; @@ -1303,11 +1283,6 @@ pub const Type = struct { const decl = mod.declPtr(enum_numbered.owner_decl); try decl.renderFullyQualifiedName(mod, writer); }, - .@"opaque" => { - const opaque_obj = ty.cast(Payload.Opaque).?.data; - const decl = mod.declPtr(opaque_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, .error_set_inferred => { const func = ty.castTag(.error_set_inferred).?.data.func; @@ -1575,6 +1550,10 @@ pub const Type = struct { .simple_type => |s| return writer.writeAll(@tagName(s)), .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => |opaque_type| { + const decl = mod.declPtr(opaque_type.decl); + try decl.renderFullyQualifiedName(mod, writer); + }, // values, not types .simple_value => unreachable, @@ -1622,7 +1601,6 @@ pub const Type = struct { .none => switch (ty.tag()) { .error_set_inferred, - .@"opaque", .error_set_single, .error_union, .error_set, @@ -1759,8 +1737,8 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type.bits != 0, + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| int_type.bits != 0, .ptr_type => |ptr_type| { // Pointers to zero-bit types still have a runtime address; however, pointers // to comptime-only types do not, with the exception of function pointers. @@ -1797,7 +1775,7 @@ pub const Type = struct { } }, .error_union_type => @panic("TODO"), - .simple_type => |t| return switch (t) { + .simple_type => |t| switch (t) { .f16, .f32, .f64, @@ -1848,6 +1826,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => true, // values, not types .simple_value => unreachable, @@ -1876,7 +1855,6 @@ pub const Type = struct { .error_set_single, .error_set_inferred, .error_set_merged, - .@"opaque", // These are function bodies, not function pointers. .function, .enum_simple, @@ -1960,6 +1938,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => false, // values, not types .simple_value => unreachable, @@ -2144,8 +2123,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, .none => switch (ty.tag()) { - .@"opaque" => return AbiAlignmentAdvanced{ .scalar = 1 }, - // represents machine code; not a pointer .function => { const alignment = ty.castTag(.function).?.data.alignment; @@ -2362,6 +2339,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 }, // values, not types .simple_value => unreachable, @@ -2536,7 +2514,6 @@ pub const Type = struct { .none => switch (ty.tag()) { .function => unreachable, // represents machine code; not a pointer - .@"opaque" => unreachable, // no size available .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, @@ -2777,6 +2754,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => unreachable, // no size available // values, not types .simple_value => unreachable, @@ -2948,6 +2926,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => unreachable, // values, not types .simple_value => unreachable, @@ -2965,7 +2944,6 @@ pub const Type = struct { .empty_struct => unreachable, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .@"opaque" => unreachable, .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -3806,6 +3784,7 @@ pub const Type = struct { .simple_type => unreachable, // handled via Index enum tag above .struct_type => @panic("TODO"), .union_type => unreachable, + .opaque_type => unreachable, // values, not types .simple_value => unreachable, @@ -4004,7 +3983,6 @@ pub const Type = struct { .function, .array_sentinel, .error_set_inferred, - .@"opaque", .anyframe_T, .pointer, => return null, @@ -4182,6 +4160,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => return null, // values, not types .simple_value => unreachable, @@ -4208,7 +4187,6 @@ pub const Type = struct { .error_set_single, .error_set_inferred, .error_set_merged, - .@"opaque", .enum_simple, => false, @@ -4350,6 +4328,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => false, // values, not types .simple_value => unreachable, @@ -4399,21 +4378,31 @@ pub const Type = struct { } /// Returns null if the type has no namespace. - pub fn getNamespace(self: Type) ?*Module.Namespace { - return switch (self.tag()) { - .@"struct" => &self.castTag(.@"struct").?.data.namespace, - .enum_full => &self.castTag(.enum_full).?.data.namespace, - .enum_nonexhaustive => &self.castTag(.enum_nonexhaustive).?.data.namespace, - .empty_struct => self.castTag(.empty_struct).?.data, - .@"opaque" => &self.castTag(.@"opaque").?.data.namespace, - .@"union" => &self.castTag(.@"union").?.data.namespace, - .union_safety_tagged => &self.castTag(.union_safety_tagged).?.data.namespace, - .union_tagged => &self.castTag(.union_tagged).?.data.namespace, + pub fn getNamespaceIndex(ty: Type, mod: *Module) Module.Namespace.OptionalIndex { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .@"struct" => ty.castTag(.@"struct").?.data.namespace.toOptional(), + .enum_full => ty.castTag(.enum_full).?.data.namespace.toOptional(), + .enum_nonexhaustive => ty.castTag(.enum_nonexhaustive).?.data.namespace.toOptional(), + .empty_struct => @panic("TODO"), + .@"union" => ty.castTag(.@"union").?.data.namespace.toOptional(), + .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.namespace.toOptional(), + .union_tagged => ty.castTag(.union_tagged).?.data.namespace.toOptional(), - else => null, + else => .none, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + else => .none, + }, }; } + /// Returns null if the type has no namespace. + pub fn getNamespace(ty: Type, mod: *Module) ?*Module.Namespace { + return if (getNamespaceIndex(ty, mod).unwrap()) |i| mod.namespacePtr(i) else null; + } + // Works for vectors and vectors of integers. pub fn minInt(ty: Type, arena: Allocator, mod: *Module) !Value { const scalar = try minIntScalar(ty.scalarType(mod), mod); @@ -4911,78 +4900,81 @@ pub const Type = struct { } pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - else => return null, - }; - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.srcLoc(mod); - }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - return enum_numbered.srcLoc(mod); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.srcLoc(mod); - }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.srcLoc(mod); - }, - .error_set => { - const error_set = ty.castTag(.error_set).?.data; - return error_set.srcLoc(mod); - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.srcLoc(mod); + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .enum_full, .enum_nonexhaustive => { + const enum_full = ty.cast(Payload.EnumFull).?.data; + return enum_full.srcLoc(mod); + }, + .enum_numbered => { + const enum_numbered = ty.castTag(.enum_numbered).?.data; + return enum_numbered.srcLoc(mod); + }, + .enum_simple => { + const enum_simple = ty.castTag(.enum_simple).?.data; + return enum_simple.srcLoc(mod); + }, + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + return struct_obj.srcLoc(mod); + }, + .error_set => { + const error_set = ty.castTag(.error_set).?.data; + return error_set.srcLoc(mod); + }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + return union_obj.srcLoc(mod); + }, + + else => return null, }, - .@"opaque" => { - const opaque_obj = ty.cast(Payload.Opaque).?.data; - return opaque_obj.srcLoc(mod); + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), + else => null, }, - - else => return null, } } - pub fn getOwnerDecl(ty: Type) Module.Decl.Index { - return ty.getOwnerDeclOrNull() orelse unreachable; + pub fn getOwnerDecl(ty: Type, mod: *Module) Module.Decl.Index { + return ty.getOwnerDeclOrNull(mod) orelse unreachable; } - pub fn getOwnerDeclOrNull(ty: Type) ?Module.Decl.Index { - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.owner_decl; - }, - .enum_numbered => return ty.castTag(.enum_numbered).?.data.owner_decl, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.owner_decl; - }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.owner_decl; - }, - .error_set => { - const error_set = ty.castTag(.error_set).?.data; - return error_set.owner_decl; - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.owner_decl; + pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?Module.Decl.Index { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .enum_full, .enum_nonexhaustive => { + const enum_full = ty.cast(Payload.EnumFull).?.data; + return enum_full.owner_decl; + }, + .enum_numbered => return ty.castTag(.enum_numbered).?.data.owner_decl, + .enum_simple => { + const enum_simple = ty.castTag(.enum_simple).?.data; + return enum_simple.owner_decl; + }, + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + return struct_obj.owner_decl; + }, + .error_set => { + const error_set = ty.castTag(.error_set).?.data; + return error_set.owner_decl; + }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + return union_obj.owner_decl; + }, + + else => return null, }, - .@"opaque" => { - const opaque_obj = ty.cast(Payload.Opaque).?.data; - return opaque_obj.owner_decl; + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .opaque_type => |opaque_type| opaque_type.decl, + else => null, }, - - else => return null, } } @@ -5022,7 +5014,6 @@ pub const Type = struct { error_set_inferred, error_set_merged, empty_struct, - @"opaque", @"struct", @"union", union_safety_tagged, @@ -5055,7 +5046,6 @@ pub const Type = struct { .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, - .@"opaque" => Payload.Opaque, .@"struct" => Payload.Struct, .@"union", .union_safety_tagged, .union_tagged => Payload.Union, .enum_full, .enum_nonexhaustive => Payload.EnumFull, @@ -5336,11 +5326,6 @@ pub const Type = struct { data: *Module.Namespace, }; - pub const Opaque = struct { - base: Payload = .{ .tag = .@"opaque" }, - data: *Module.Opaque, - }; - pub const Struct = struct { base: Payload = .{ .tag = .@"struct" }, data: *Module.Struct, From 8297f28546b44afe49bec074733f05e03a3c0e62 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 May 2023 12:16:24 -0700 Subject: [PATCH 057/205] stage2: move struct types and aggregate values to InternPool --- src/InternPool.zig | 296 +++++++-- src/Module.zig | 179 +++-- src/Sema.zig | 642 +++++++++--------- src/TypedValue.zig | 20 +- src/arch/aarch64/CodeGen.zig | 6 +- src/arch/aarch64/abi.zig | 12 +- src/arch/arm/CodeGen.zig | 6 +- src/arch/arm/abi.zig | 12 +- src/arch/riscv64/abi.zig | 4 +- src/arch/sparc64/CodeGen.zig | 4 +- src/arch/wasm/CodeGen.zig | 31 +- src/arch/wasm/abi.zig | 18 +- src/arch/x86_64/CodeGen.zig | 34 +- src/arch/x86_64/abi.zig | 8 +- src/codegen.zig | 6 +- src/codegen/c.zig | 228 +++---- src/codegen/c/type.zig | 50 +- src/codegen/llvm.zig | 54 +- src/codegen/spirv.zig | 8 +- src/link/Dwarf.zig | 4 +- src/type.zig | 1183 +++++++++++++++++----------------- src/value.zig | 45 +- 22 files changed, 1570 insertions(+), 1280 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 3708e21ef6c2..315865c96619 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1,5 +1,10 @@ //! All interned objects have both a value and a type. +//! This data structure is self-contained, with the following exceptions: +//! * type_struct via Module.Struct.Index +//! * type_opaque via Module.Namespace.Index and Module.Decl.Index +/// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are +/// constructed lazily. map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, @@ -9,6 +14,13 @@ extra: std.ArrayListUnmanaged(u32) = .{}, /// violate the above mechanism. limbs: std.ArrayListUnmanaged(u64) = .{}, +/// Struct objects are stored in this data structure because: +/// * They contain pointers such as the field maps. +/// * They need to be mutated after creation. +allocated_structs: std.SegmentedList(Module.Struct, 0) = .{}, +/// When a Struct object is freed from `allocated_structs`, it is pushed into this stack. +structs_free_list: std.ArrayListUnmanaged(Module.Struct.Index) = .{}, + const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -17,8 +29,7 @@ const BigIntMutable = std.math.big.int.Mutable; const Limb = std.math.big.Limb; const InternPool = @This(); -const DeclIndex = @import("Module.zig").Decl.Index; -const NamespaceIndex = @import("Module.zig").Namespace.Index; +const Module = @import("Module.zig"); const KeyAdapter = struct { intern_pool: *const InternPool, @@ -45,11 +56,20 @@ pub const Key = union(enum) { payload_type: Index, }, simple_type: SimpleType, + /// If `empty_struct_type` is handled separately, then this value may be + /// safely assumed to never be `none`. + struct_type: StructType, + union_type: struct { + fields_len: u32, + // TODO move Module.Union data to InternPool + }, + opaque_type: OpaqueType, + simple_value: SimpleValue, extern_func: struct { ty: Index, /// The Decl that corresponds to the function itself. - decl: DeclIndex, + decl: Module.Decl.Index, /// Library name if specified. /// For example `extern "c" fn write(...) usize` would have 'c' as library name. /// Index into the string table bytes. @@ -62,13 +82,11 @@ pub const Key = union(enum) { ty: Index, tag: BigIntConst, }, - struct_type: StructType, - opaque_type: OpaqueType, - - union_type: struct { - fields_len: u32, - // TODO move Module.Union data to InternPool - }, + /// An instance of a struct, array, or vector. + /// Each element/field stored as an `Index`. + /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, + /// so the slice length will be one more than the type's array length. + aggregate: Aggregate, pub const IntType = std.builtin.Type.Int; @@ -113,16 +131,27 @@ pub const Key = union(enum) { child: Index, }; - pub const StructType = struct { - fields_len: u32, - // TODO move Module.Struct data to InternPool - }; - pub const OpaqueType = struct { /// The Decl that corresponds to the opaque itself. - decl: DeclIndex, + decl: Module.Decl.Index, /// Represents the declarations inside this opaque. - namespace: NamespaceIndex, + namespace: Module.Namespace.Index, + }; + + /// There are three possibilities here: + /// * `@TypeOf(.{})` (untyped empty struct literal) + /// - namespace == .none, index == .none + /// * A struct which has a namepace, but no fields. + /// - index == .none + /// * A struct which has fields as well as a namepace. + pub const StructType = struct { + /// This will be `none` only in the case of `@TypeOf(.{})` + /// (`Index.empty_struct_type`). + namespace: Module.Namespace.OptionalIndex, + /// The `none` tag is used to represent two cases: + /// * `@TypeOf(.{})`, in which case `namespace` will also be `none`. + /// * A struct with no fields, in which case `namespace` will be populated. + index: Module.Struct.OptionalIndex, }; pub const Int = struct { @@ -156,18 +185,24 @@ pub const Key = union(enum) { addr: Addr, pub const Addr = union(enum) { - decl: DeclIndex, + decl: Module.Decl.Index, int: Index, }; }; /// `null` is represented by the `val` field being `none`. pub const Opt = struct { + /// This is the optional type; not the payload type. ty: Index, /// This could be `none`, indicating the optional is `null`. val: Index, }; + pub const Aggregate = struct { + ty: Index, + fields: []const Index, + }; + pub fn hash32(key: Key) u32 { return @truncate(u32, key.hash64()); } @@ -193,8 +228,15 @@ pub const Key = union(enum) { .simple_value, .extern_func, .opt, + .struct_type, => |info| std.hash.autoHash(hasher, info), + .union_type => |union_type| { + _ = union_type; + @panic("TODO"); + }, + .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), + .int => |int| { // Canonicalize all integers by converting them to BigIntConst. var buffer: Key.Int.Storage.BigIntSpace = undefined; @@ -221,16 +263,10 @@ pub const Key = union(enum) { for (enum_tag.tag.limbs) |limb| std.hash.autoHash(hasher, limb); }, - .struct_type => |struct_type| { - if (struct_type.fields_len != 0) { - @panic("TODO"); - } - }, - .union_type => |union_type| { - _ = union_type; - @panic("TODO"); + .aggregate => |aggregate| { + std.hash.autoHash(hasher, aggregate.ty); + for (aggregate.fields) |field| std.hash.autoHash(hasher, field); }, - .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), } } @@ -280,6 +316,10 @@ pub const Key = union(enum) { const b_info = b.opt; return std.meta.eql(a_info, b_info); }, + .struct_type => |a_info| { + const b_info = b.struct_type; + return std.meta.eql(a_info, b_info); + }, .ptr => |a_info| { const b_info = b.ptr; @@ -331,16 +371,6 @@ pub const Key = union(enum) { @panic("TODO"); }, - .struct_type => |a_info| { - const b_info = b.struct_type; - - // TODO: remove this special case for empty_struct - if (a_info.fields_len == 0 and b_info.fields_len == 0) - return true; - - @panic("TODO"); - }, - .union_type => |a_info| { const b_info = b.union_type; @@ -353,6 +383,11 @@ pub const Key = union(enum) { const b_info = b.opaque_type; return a_info.decl == b_info.decl; }, + .aggregate => |a_info| { + const b_info = b.aggregate; + if (a_info.ty != b_info.ty) return false; + return std.mem.eql(Index, a_info.fields, b_info.fields); + }, } } @@ -375,6 +410,7 @@ pub const Key = union(enum) { .opt, .extern_func, .enum_tag, + .aggregate, => |x| return x.ty, .simple_value => |s| switch (s) { @@ -471,6 +507,7 @@ pub const Index = enum(u32) { anyerror_void_error_union_type, generic_poison_type, var_args_param_type, + /// `@TypeOf(.{})` empty_struct_type, /// `undefined` (untyped) @@ -691,7 +728,8 @@ pub const static_keys = [_]Key{ // empty_struct_type .{ .struct_type = .{ - .fields_len = 0, + .namespace = .none, + .index = .none, } }, .{ .simple_value = .undefined }, @@ -792,16 +830,18 @@ pub const Tag = enum(u8) { /// An opaque type. /// data is index of Key.OpaqueType in extra. type_opaque, + /// A struct type. + /// data is Module.Struct.OptionalIndex + /// The `none` tag is used to represent `@TypeOf(.{})`. + type_struct, + /// A struct type that has only a namespace; no fields, and there is no + /// Module.Struct object allocated for it. + /// data is Module.Namespace.Index. + type_struct_ns, /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, - /// The SimpleType and SimpleValue enums are exposed via the InternPool API using - /// SimpleType and SimpleValue as the Key data themselves. - /// This tag is for miscellaneous types and values that can be represented with - /// only an enum tag, but will be presented via the API with a different Key. - /// data is SimpleInternal enum value. - simple_internal, /// A pointer to an integer value. /// data is extra index of PtrInt, which contains the type and address. /// Only pointer types are allowed to have this encoding. Optional types must use @@ -809,6 +849,8 @@ pub const Tag = enum(u8) { ptr_int, /// An optional value that is non-null. /// data is Index of the payload value. + /// In order to use this encoding, one must ensure that the `InternPool` + /// already contains the optional type corresponding to this payload. opt_payload, /// An optional value that is null. /// data is Index of the payload type. @@ -859,6 +901,13 @@ pub const Tag = enum(u8) { extern_func, /// A regular function. func, + /// This represents the only possible value for *some* types which have + /// only one possible value. Not all only-possible-values are encoded this way; + /// for example structs which have all comptime fields are not encoded this way. + /// The set of values that are encoded this way is: + /// * A struct which has 0 fields. + /// data is Index of the type, which is known to be zero bits at runtime. + only_possible_value, }; /// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to @@ -912,9 +961,12 @@ pub const SimpleType = enum(u32) { }; pub const SimpleValue = enum(u32) { + /// This is untyped `undefined`. undefined, void, + /// This is untyped `null`. null, + /// This is the untyped empty struct literal: `.{}` empty_struct, true, false, @@ -923,12 +975,6 @@ pub const SimpleValue = enum(u32) { generic_poison, }; -pub const SimpleInternal = enum(u32) { - /// This is the empty struct type. Note that empty_struct value is exposed - /// via SimpleValue. - type_empty_struct, -}; - pub const Pointer = struct { child: Index, sentinel: Index, @@ -1005,7 +1051,7 @@ pub const ErrorUnion = struct { /// 0. field name: null-terminated string index for each fields_len; declaration order pub const EnumSimple = struct { /// The Decl that corresponds to the enum itself. - decl: DeclIndex, + decl: Module.Decl.Index, /// An integer type which is used for the numerical value of the enum. This /// is inferred by Zig to be the smallest power of two unsigned int that /// fits the number of fields. It is stored here to avoid unnecessary @@ -1091,6 +1137,10 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.items.deinit(gpa); ip.extra.deinit(gpa); ip.limbs.deinit(gpa); + + ip.structs_free_list.deinit(gpa); + ip.allocated_structs.deinit(gpa); + ip.* = undefined; } @@ -1167,20 +1217,38 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_enum_simple => @panic("TODO"), .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, - - .simple_internal => switch (@intToEnum(SimpleInternal, data)) { - .type_empty_struct => .{ .struct_type = .{ - .fields_len = 0, - } }, + .type_struct => { + const struct_index = @intToEnum(Module.Struct.OptionalIndex, data); + const namespace = if (struct_index.unwrap()) |i| + ip.structPtrConst(i).namespace.toOptional() + else + .none; + return .{ .struct_type = .{ + .index = struct_index, + .namespace = namespace, + } }; }, + .type_struct_ns => .{ .struct_type = .{ + .index = .none, + .namespace = @intToEnum(Module.Namespace.Index, data).toOptional(), + } }, + .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), .val = .none, } }, - .opt_payload => .{ .opt = .{ - .ty = indexToKey(ip, @intToEnum(Index, data)).typeOf(), - .val = @intToEnum(Index, data), - } }, + .opt_payload => { + const payload_val = @intToEnum(Index, data); + // The existence of `opt_payload` guarantees that the optional type will be + // stored in the `InternPool`. + const opt_ty = ip.getAssumeExists(.{ + .opt_type = indexToKey(ip, payload_val).typeOf(), + }); + return .{ .opt = .{ + .ty = opt_ty, + .val = payload_val, + } }; + }, .ptr_int => { const info = ip.extraData(PtrInt, data); return .{ .ptr = .{ @@ -1225,6 +1293,16 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .float_f128 => @panic("TODO"), .extern_func => @panic("TODO"), .func => @panic("TODO"), + .only_possible_value => { + const ty = @intToEnum(Index, data); + return switch (ip.indexToKey(ty)) { + .struct_type => .{ .aggregate = .{ + .ty = ty, + .fields = &.{}, + } }, + else => unreachable, + }; + }, }; } @@ -1359,12 +1437,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .struct_type => |struct_type| { - if (struct_type.fields_len != 0) { - @panic("TODO"); // handle structs other than empty_struct - } - ip.items.appendAssumeCapacity(.{ - .tag = .simple_internal, - .data = @enumToInt(SimpleInternal.type_empty_struct), + ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{ + .tag = .type_struct, + .data = @enumToInt(i), + } else if (struct_type.namespace.unwrap()) |i| .{ + .tag = .type_struct_ns, + .data = @enumToInt(i), + } else .{ + .tag = .type_struct, + .data = @enumToInt(Module.Struct.OptionalIndex.none), }); }, @@ -1398,6 +1479,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .opt => |opt| { assert(opt.ty != .none); + assert(ip.isOptionalType(opt.ty)); ip.items.appendAssumeCapacity(if (opt.val == .none) .{ .tag = .opt_null, .data = @enumToInt(opt.ty), @@ -1549,10 +1631,35 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const tag: Tag = if (enum_tag.tag.positive) .enum_tag_positive else .enum_tag_negative; try addInt(ip, gpa, enum_tag.ty, tag, enum_tag.tag.limbs); }, + + .aggregate => |aggregate| { + if (aggregate.fields.len == 0) { + ip.items.appendAssumeCapacity(.{ + .tag = .only_possible_value, + .data = @enumToInt(aggregate.ty), + }); + return @intToEnum(Index, ip.items.len - 1); + } + @panic("TODO"); + }, } return @intToEnum(Index, ip.items.len - 1); } +pub fn getAssumeExists(ip: InternPool, key: Key) Index { + const adapter: KeyAdapter = .{ .intern_pool = &ip }; + const index = ip.map.getIndexAdapted(key, adapter).?; + return @intToEnum(Index, index); +} + +/// This operation only happens under compile error conditions. +/// Leak the index until the next garbage collection. +pub fn remove(ip: *InternPool, index: Index) void { + _ = ip; + _ = index; + @panic("TODO this is a bit problematic to implement, could we maybe just never support a remove() operation on InternPool?"); +} + fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void { const limbs_len = @intCast(u32, limbs.len); try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); @@ -1578,8 +1685,8 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { ip.extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), Index => @enumToInt(@field(extra, field.name)), - DeclIndex => @enumToInt(@field(extra, field.name)), - NamespaceIndex => @enumToInt(@field(extra, field.name)), + Module.Decl.Index => @enumToInt(@field(extra, field.name)), + Module.Namespace.Index => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), Pointer.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), @@ -1635,8 +1742,8 @@ fn extraData(ip: InternPool, comptime T: type, index: usize) T { @field(result, field.name) = switch (field.type) { u32 => int32, Index => @intToEnum(Index, int32), - DeclIndex => @intToEnum(DeclIndex, int32), - NamespaceIndex => @intToEnum(NamespaceIndex, int32), + Module.Decl.Index => @intToEnum(Module.Decl.Index, int32), + Module.Namespace.Index => @intToEnum(Module.Namespace.Index, int32), i32 => @bitCast(i32, int32), Pointer.Flags => @bitCast(Pointer.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), @@ -1808,6 +1915,20 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al } } +pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex { + const tags = ip.items.items(.tag); + if (val == .none) return .none; + if (tags[@enumToInt(val)] != .type_struct) return .none; + const datas = ip.items.items(.data); + return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); +} + +pub fn isOptionalType(ip: InternPool, ty: Index) bool { + const tags = ip.items.items(.tag); + if (ty == .none) return false; + return tags[@enumToInt(ty)] == .type_optional; +} + pub fn dump(ip: InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } @@ -1859,9 +1980,10 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_error_union => @sizeOf(ErrorUnion), .type_enum_simple => @sizeOf(EnumSimple), .type_opaque => @sizeOf(Key.OpaqueType), + .type_struct => 0, + .type_struct_ns => 0, .simple_type => 0, .simple_value => 0, - .simple_internal => 0, .ptr_int => @sizeOf(PtrInt), .opt_null => 0, .opt_payload => 0, @@ -1887,6 +2009,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .float_f128 => @sizeOf(Float128), .extern_func => @panic("TODO"), .func => @panic("TODO"), + .only_possible_value => 0, }); } const SortContext = struct { @@ -1905,3 +2028,34 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { }); } } + +pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct { + return ip.allocated_structs.at(@enumToInt(index)); +} + +pub fn structPtrConst(ip: InternPool, index: Module.Struct.Index) *const Module.Struct { + return ip.allocated_structs.at(@enumToInt(index)); +} + +pub fn structPtrUnwrapConst(ip: InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct { + return structPtrConst(ip, index.unwrap() orelse return null); +} + +pub fn createStruct( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Struct, +) Allocator.Error!Module.Struct.Index { + if (ip.structs_free_list.popOrNull()) |index| return index; + const ptr = try ip.allocated_structs.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Struct.Index, ip.allocated_structs.len - 1); +} + +pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index) void { + ip.structPtr(index).* = undefined; + ip.structs_free_list.append(gpa, index) catch { + // In order to keep `destroyStruct` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Struct until garbage collection. + }; +} diff --git a/src/Module.zig b/src/Module.zig index 7521d4d43928..ada69537f6d9 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -839,11 +839,14 @@ pub const Decl = struct { /// If the Decl has a value and it is a struct, return it, /// otherwise null. - pub fn getStruct(decl: *Decl) ?*Struct { - if (!decl.owns_tv) return null; - const ty = (decl.val.castTag(.ty) orelse return null).data; - const struct_obj = (ty.castTag(.@"struct") orelse return null).data; - return struct_obj; + pub fn getStruct(decl: *Decl, mod: *Module) ?*Struct { + return mod.structPtrUnwrap(getStructIndex(decl, mod)); + } + + pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex { + if (!decl.owns_tv) return .none; + const ty = (decl.val.castTag(.ty) orelse return .none).data; + return mod.intern_pool.indexToStruct(ty.ip_index); } /// If the Decl has a value and it is a union, return it, @@ -884,32 +887,29 @@ pub const Decl = struct { /// Only returns it if the Decl is the owner. pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex { if (!decl.owns_tv) return .none; - if (decl.val.ip_index == .none) { - const ty = (decl.val.castTag(.ty) orelse return .none).data; - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.namespace.toOptional(); - }, - .enum_full, .enum_nonexhaustive => { - const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; - return enum_obj.namespace.toOptional(); - }, - .empty_struct => { - @panic("TODO"); - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - return union_obj.namespace.toOptional(); - }, + switch (decl.val.ip_index) { + .empty_struct_type => return .none, + .none => { + const ty = (decl.val.castTag(.ty) orelse return .none).data; + switch (ty.tag()) { + .enum_full, .enum_nonexhaustive => { + const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; + return enum_obj.namespace.toOptional(); + }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + return union_obj.namespace.toOptional(); + }, - else => return .none, - } + else => return .none, + } + }, + else => return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + .struct_type => |struct_type| struct_type.namespace, + else => .none, + }, } - return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { - .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), - else => .none, - }; } /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. @@ -1046,6 +1046,28 @@ pub const Struct = struct { is_tuple: bool, assumed_runtime_bits: bool = false, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + pub const Fields = std.StringArrayHashMapUnmanaged(Field); /// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl. @@ -1111,12 +1133,7 @@ pub const Struct = struct { } pub fn srcLoc(s: Struct, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(s.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(mod), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; + return mod.declPtr(s.owner_decl).srcLoc(mod); } pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc { @@ -3622,6 +3639,16 @@ pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace { return mod.allocated_namespaces.at(@enumToInt(index)); } +pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { + return mod.intern_pool.structPtr(index); +} + +/// This one accepts an index from the InternPool and asserts that it is not +/// the anonymous empty struct type. +pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { + return structPtr(mod, index.unwrap() orelse return null); +} + /// Returns true if and only if the Decl is the top level struct associated with a File. pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { const decl = mod.declPtr(decl_index); @@ -4078,7 +4105,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { if (!decl.owns_tv) continue; - if (decl.getStruct()) |struct_obj| { + if (decl.getStruct(mod)) |struct_obj| { struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl_index); continue; @@ -4597,36 +4624,50 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); - const struct_obj = try new_decl_arena_allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); - const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); - const ty_ty = comptime Type.type; - struct_obj.* = .{ - .owner_decl = undefined, // set below + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. + const new_namespace_index = try mod.createNamespace(.{ + .parent = .none, + .ty = undefined, + .file_scope = file, + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const new_decl_index = try mod.allocateNewDecl(new_namespace_index, 0, null); + const new_decl = mod.declPtr(new_decl_index); + errdefer @panic("TODO error handling"); + + const struct_index = try mod.createStruct(.{ + .owner_decl = new_decl_index, .fields = .{}, .zir_index = undefined, // set below .layout = .Auto, .status = .none, .known_non_opv = undefined, .is_tuple = undefined, // set below - .namespace = try mod.createNamespace(.{ - .parent = .none, - .ty = struct_ty, - .file_scope = file, - }), - }; - const new_decl_index = try mod.allocateNewDecl(struct_obj.namespace, 0, null); - const new_decl = mod.declPtr(new_decl_index); + .namespace = new_namespace_index, + }); + errdefer mod.destroyStruct(struct_index); + + const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ + .index = struct_index.toOptional(), + .namespace = new_namespace_index.toOptional(), + } }); + errdefer mod.intern_pool.remove(struct_ty); + + new_namespace.ty = struct_ty.toType(); file.root_decl = new_decl_index.toOptional(); - struct_obj.owner_decl = new_decl_index; + new_decl.name = try file.fullyQualifiedNameZ(gpa); new_decl.src_line = 0; new_decl.is_pub = true; new_decl.is_exported = false; new_decl.has_align = false; new_decl.has_linksection_or_addrspace = false; - new_decl.ty = ty_ty; - new_decl.val = struct_val; + new_decl.ty = Type.type; + new_decl.val = struct_ty.toValue(); new_decl.@"align" = 0; new_decl.@"linksection" = null; new_decl.has_tv = true; @@ -4639,6 +4680,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { if (file.status == .success_zir) { assert(file.zir_loaded); const main_struct_inst = Zir.main_struct_inst; + const struct_obj = mod.structPtr(struct_index); struct_obj.zir_index = main_struct_inst; const extended = file.zir.instructions.items(.data)[main_struct_inst].extended; const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -4665,7 +4707,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { var wip_captures = try WipCaptureScope.init(gpa, new_decl_arena_allocator, null); defer wip_captures.deinit(); - if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_obj)) |_| { + if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| { try wip_captures.finalize(); new_decl.analysis = .complete; } else |err| switch (err) { @@ -4761,11 +4803,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (mod.declIsRoot(decl_index)) { log.debug("semaDecl root {*} ({s})", .{ decl, decl.name }); const main_struct_inst = Zir.main_struct_inst; - const struct_obj = decl.getStruct().?; + const struct_index = decl.getStructIndex(mod).unwrap().?; + const struct_obj = mod.structPtr(struct_index); // This might not have gotten set in `semaFile` if the first time had // a ZIR failure, so we set it here in case. struct_obj.zir_index = main_struct_inst; - try sema.analyzeStructDecl(decl, main_struct_inst, struct_obj); + try sema.analyzeStructDecl(decl, main_struct_inst, struct_index); decl.analysis = .complete; decl.generation = mod.generation; return false; @@ -5970,6 +6013,14 @@ pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { }; } +pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index { + return mod.intern_pool.createStruct(mod.gpa, initialization); +} + +pub fn destroyStruct(mod: *Module, index: Struct.Index) void { + return mod.intern_pool.destroyStruct(mod.gpa, index); +} + pub fn allocateNewDecl( mod: *Module, namespace: Namespace.Index, @@ -7202,12 +7253,7 @@ pub fn atomicPtrAlignment( } pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc { - const owner_decl = mod.declPtr(opaque_type.decl); - return .{ - .file_scope = owner_decl.getFileScope(mod), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; + return mod.declPtr(opaque_type.decl).srcLoc(mod); } pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) ![:0]u8 { @@ -7221,3 +7267,12 @@ pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File { pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.Index { return mod.namespacePtr(namespace_index).getDeclIndex(mod); } + +/// Returns null in the following cases: +/// * `@TypeOf(.{})` +/// * A struct which has no fields (`struct {}`). +/// * Not a struct. +pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct { + const struct_index = mod.intern_pool.indexToStruct(ty.ip_index).unwrap() orelse return null; + return mod.structPtr(struct_index); +} diff --git a/src/Sema.zig b/src/Sema.zig index 35440395c431..1f72470f9eb3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2090,16 +2090,17 @@ fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: } fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazySrcLoc, container_ty: Type, field_index: usize) CompileError { + const mod = sema.mod; const msg = msg: { const msg = try sema.errMsg(block, init_src, "value stored in comptime field does not match the default value of the field", .{}); errdefer msg.destroy(sema.gpa); - const struct_ty = container_ty.castTag(.@"struct") orelse break :msg msg; - const default_value_src = struct_ty.data.fieldSrcLoc(sema.mod, .{ + const struct_ty = mod.typeToStruct(container_ty) orelse break :msg msg; + const default_value_src = struct_ty.fieldSrcLoc(mod, .{ .index = field_index, .range = .value, }); - try sema.mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{}); + try mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -2632,8 +2633,10 @@ pub fn analyzeStructDecl( sema: *Sema, new_decl: *Decl, inst: Zir.Inst.Index, - struct_obj: *Module.Struct, + struct_index: Module.Struct.Index, ) SemaError!void { + const mod = sema.mod; + const struct_obj = mod.structPtr(struct_index); const extended = sema.code.instructions.items(.data)[inst].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -2662,7 +2665,7 @@ pub fn analyzeStructDecl( } } - _ = try sema.mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl); + _ = try mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl); } fn zirStructDecl( @@ -2671,28 +2674,38 @@ fn zirStructDecl( extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const mod = sema.mod; - const struct_obj = try new_decl_arena_allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); - const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = struct_val, + .val = undefined, }, small.name_strategy, "struct", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - struct_obj.* = .{ + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const struct_index = try mod.createStruct(.{ .owner_decl = new_decl_index, .fields = .{}, .zir_index = inst, @@ -2700,13 +2713,20 @@ fn zirStructDecl( .status = .none, .known_non_opv = undefined, .is_tuple = small.is_tuple, - .namespace = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = struct_ty, - .file_scope = block.getFileScope(mod), - }), - }; - try sema.analyzeStructDecl(new_decl, inst, struct_obj); + .namespace = new_namespace_index, + }); + errdefer mod.destroyStruct(struct_index); + + const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ + .index = struct_index.toOptional(), + .namespace = new_namespace_index.toOptional(), + } }); + errdefer mod.intern_pool.remove(struct_ty); + + new_decl.val = struct_ty.toValue(); + new_namespace.ty = struct_ty.toType(); + + try sema.analyzeStructDecl(new_decl, inst, struct_index); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); } @@ -2721,6 +2741,7 @@ fn createAnonymousDeclTypeNamed( inst: ?Zir.Inst.Index, ) !Decl.Index { const mod = sema.mod; + const gpa = sema.gpa; const namespace = block.namespace; const src_scope = block.wip_capture_scope; const src_decl = mod.declPtr(block.src_decl); @@ -2736,16 +2757,16 @@ fn createAnonymousDeclTypeNamed( // semantically analyzed. // This name is also used as the key in the parent namespace so it cannot be // renamed. - const name = try std.fmt.allocPrintZ(sema.gpa, "{s}__{s}_{d}", .{ + const name = try std.fmt.allocPrintZ(gpa, "{s}__{s}_{d}", .{ src_decl.name, anon_prefix, @enumToInt(new_decl_index), }); - errdefer sema.gpa.free(name); + errdefer gpa.free(name); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, .parent => { - const name = try sema.gpa.dupeZ(u8, mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); - errdefer sema.gpa.free(name); + const name = try gpa.dupeZ(u8, mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); + errdefer gpa.free(name); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2753,7 +2774,7 @@ fn createAnonymousDeclTypeNamed( const fn_info = sema.code.getFnInfo(sema.func.?.zir_body_inst); const zir_tags = sema.code.instructions.items(.tag); - var buf = std.ArrayList(u8).init(sema.gpa); + var buf = std.ArrayList(u8).init(gpa); defer buf.deinit(); try buf.appendSlice(mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); try buf.appendSlice("("); @@ -2781,7 +2802,7 @@ fn createAnonymousDeclTypeNamed( try buf.appendSlice(")"); const name = try buf.toOwnedSliceSentinel(0); - errdefer sema.gpa.free(name); + errdefer gpa.free(name); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2794,10 +2815,10 @@ fn createAnonymousDeclTypeNamed( .dbg_var_ptr, .dbg_var_val => { if (zir_data[i].str_op.operand != ref) continue; - const name = try std.fmt.allocPrintZ(sema.gpa, "{s}.{s}", .{ + const name = try std.fmt.allocPrintZ(gpa, "{s}.{s}", .{ src_decl.name, zir_data[i].str_op.getStr(sema.code), }); - errdefer sema.gpa.free(name); + errdefer gpa.free(name); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; @@ -3216,13 +3237,13 @@ fn zirOpaqueDecl( .file_scope = block.getFileScope(mod), }); const new_namespace = mod.namespacePtr(new_namespace_index); - errdefer @panic("TODO error handling"); + errdefer mod.destroyNamespace(new_namespace_index); const opaque_ty = try mod.intern_pool.get(gpa, .{ .opaque_type = .{ .decl = new_decl_index, .namespace = new_namespace_index, } }); - errdefer @panic("TODO error handling"); + errdefer mod.intern_pool.remove(opaque_ty); new_decl.val = opaque_ty.toValue(); new_namespace.ty = opaque_ty.toType(); @@ -3960,7 +3981,7 @@ fn zirArrayBasePtr( const elem_ty = sema.typeOf(base_ptr).childType(mod); switch (elem_ty.zigTypeTag(mod)) { .Array, .Vector => return base_ptr, - .Struct => if (elem_ty.isTuple()) { + .Struct => if (elem_ty.isTuple(mod)) { // TODO validate element count return base_ptr; }, @@ -4150,7 +4171,7 @@ fn validateArrayInitTy( } return; }, - .Struct => if (ty.isTuple()) { + .Struct => if (ty.isTuple(mod)) { _ = try sema.resolveTypeFields(ty); const array_len = ty.arrayLen(mod); if (extra.init_count > array_len) { @@ -4358,7 +4379,7 @@ fn validateStructInit( const gpa = sema.gpa; // Maps field index to field_ptr index of where it was already initialized. - const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount()); + const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount(mod)); defer gpa.free(found_fields); @memset(found_fields, 0); @@ -4370,7 +4391,7 @@ fn validateStructInit( const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; struct_ptr_zir_ref = field_ptr_extra.lhs; const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); - const field_index = if (struct_ty.isTuple()) + const field_index = if (struct_ty.isTuple(mod)) try sema.tupleFieldIndex(block, struct_ty, field_name, field_src) else try sema.structFieldIndex(block, struct_ty, field_name, field_src); @@ -4403,9 +4424,9 @@ fn validateStructInit( for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) continue; - const default_val = struct_ty.structFieldDefaultValue(i); + const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.ip_index == .unreachable_value) { - if (struct_ty.isTuple()) { + if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4414,7 +4435,7 @@ fn validateStructInit( } continue; } - const field_name = struct_ty.structFieldName(i); + const field_name = struct_ty.structFieldName(i, mod); const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { @@ -4426,7 +4447,7 @@ fn validateStructInit( } const field_src = init_src; // TODO better source location - const default_field_ptr = if (struct_ty.isTuple()) + const default_field_ptr = if (struct_ty.isTuple(mod)) try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); @@ -4436,11 +4457,11 @@ fn validateStructInit( } if (root_msg) |msg| { - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const fqn = try struct_obj.data.getFullyQualifiedName(mod); + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const fqn = try struct_obj.getFullyQualifiedName(mod); defer gpa.free(fqn); try mod.errNoteNonLazy( - struct_obj.data.srcLoc(mod), + struct_obj.srcLoc(mod), msg, "struct '{s}' declared here", .{fqn}, @@ -4463,12 +4484,12 @@ fn validateStructInit( // We collect the comptime field values in case the struct initialization // ends up being comptime-known. - const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount()); + const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount(mod)); field: for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) { // Determine whether the value stored to this pointer is comptime-known. - const field_ty = struct_ty.structFieldType(i); + const field_ty = struct_ty.structFieldType(i, mod); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { field_values[i] = opv; continue; @@ -4548,9 +4569,9 @@ fn validateStructInit( continue :field; } - const default_val = struct_ty.structFieldDefaultValue(i); + const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.ip_index == .unreachable_value) { - if (struct_ty.isTuple()) { + if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4559,7 +4580,7 @@ fn validateStructInit( } continue; } - const field_name = struct_ty.structFieldName(i); + const field_name = struct_ty.structFieldName(i, mod); const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { @@ -4573,11 +4594,11 @@ fn validateStructInit( } if (root_msg) |msg| { - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod); + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const fqn = try struct_obj.getFullyQualifiedName(sema.mod); defer gpa.free(fqn); try sema.mod.errNoteNonLazy( - struct_obj.data.srcLoc(sema.mod), + struct_obj.srcLoc(sema.mod), msg, "struct '{s}' declared here", .{fqn}, @@ -4605,7 +4626,7 @@ fn validateStructInit( if (field_ptr != 0) continue; const field_src = init_src; // TODO better source location - const default_field_ptr = if (struct_ty.isTuple()) + const default_field_ptr = if (struct_ty.isTuple(mod)) try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); @@ -4638,7 +4659,7 @@ fn zirValidateArrayInit( var i = instrs.len; while (i < array_len) : (i += 1) { - const default_val = array_ty.structFieldDefaultValue(i); + const default_val = array_ty.structFieldDefaultValue(i, mod); if (default_val.ip_index == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -4698,7 +4719,7 @@ fn zirValidateArrayInit( outer: for (instrs, 0..) |elem_ptr, i| { // Determine whether the value stored to this pointer is comptime-known. - if (array_ty.isTuple()) { + if (array_ty.isTuple(mod)) { if (try array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv; continue; @@ -7950,7 +7971,7 @@ fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const indexable_ty = try sema.resolveType(block, .unneeded, bin.lhs); assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction if (indexable_ty.zigTypeTag(mod) == .Struct) { - const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs)); + const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs), mod); return sema.addType(elem_type); } else { const elem_type = indexable_ty.elemType2(mod); @@ -9822,7 +9843,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); }, - .Struct, .Union => if (dest_ty.containerLayout() == .Auto) { + .Struct, .Union => if (dest_ty.containerLayout(mod) == .Auto) { const container = switch (dest_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", @@ -9885,7 +9906,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); }, - .Struct, .Union => if (operand_ty.containerLayout() == .Auto) { + .Struct, .Union => if (operand_ty.containerLayout(mod) == .Auto) { const container = switch (operand_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", @@ -12041,12 +12062,12 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (mem.eql(u8, name, field_name)) break true; } else false; } - if (ty.isTuple()) { + if (ty.isTuple(mod)) { const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; - break :hf field_index < ty.structFieldCount(); + break :hf field_index < ty.structFieldCount(mod); } break :hf switch (ty.zigTypeTag(mod)) { - .Struct => ty.structFields().contains(field_name), + .Struct => ty.structFields(mod).contains(field_name), .Union => ty.unionFields().contains(field_name), .Enum => ty.enumFields().contains(field_name), .Array => mem.eql(u8, field_name, "len"), @@ -12601,14 +12622,15 @@ fn analyzeTupleCat( lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const src = LazySrcLoc.nodeOffset(src_node); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node }; - const lhs_len = lhs_ty.structFieldCount(); - const rhs_len = rhs_ty.structFieldCount(); + const lhs_len = lhs_ty.structFieldCount(mod); + const rhs_len = rhs_ty.structFieldCount(mod); const dest_fields = lhs_len + rhs_len; if (dest_fields == 0) { @@ -12629,8 +12651,8 @@ fn analyzeTupleCat( var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < lhs_len) : (i += 1) { - types[i] = lhs_ty.structFieldType(i); - const default_val = lhs_ty.structFieldDefaultValue(i); + types[i] = lhs_ty.structFieldType(i, mod); + const default_val = lhs_ty.structFieldDefaultValue(i, mod); values[i] = default_val; const operand_src = lhs_src; // TODO better source location if (default_val.ip_index == .unreachable_value) { @@ -12639,8 +12661,8 @@ fn analyzeTupleCat( } i = 0; while (i < rhs_len) : (i += 1) { - types[i + lhs_len] = rhs_ty.structFieldType(i); - const default_val = rhs_ty.structFieldDefaultValue(i); + types[i + lhs_len] = rhs_ty.structFieldType(i, mod); + const default_val = rhs_ty.structFieldDefaultValue(i, mod); values[i + lhs_len] = default_val; const operand_src = rhs_src; // TODO better source location if (default_val.ip_index == .unreachable_value) { @@ -12691,8 +12713,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs_ty = sema.typeOf(rhs); const src = inst_data.src(); - const lhs_is_tuple = lhs_ty.isTuple(); - const rhs_is_tuple = rhs_ty.isTuple(); + const lhs_is_tuple = lhs_ty.isTuple(mod); + const rhs_is_tuple = rhs_ty.isTuple(mod); if (lhs_is_tuple and rhs_is_tuple) { return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs); } @@ -12800,8 +12822,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var elem_i: usize = 0; while (elem_i < lhs_len) : (elem_i += 1) { const lhs_elem_i = elem_i; - const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i) else lhs_info.elem_type; - const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.@"unreachable"; + const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i, mod) else lhs_info.elem_type; + const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable"; const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); @@ -12810,8 +12832,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; - const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i) else rhs_info.elem_type; - const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.@"unreachable"; + const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i, mod) else rhs_info.elem_type; + const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable"; const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); @@ -12909,8 +12931,8 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins } }, .Struct => { - if (operand_ty.isTuple() and peer_ty.isIndexable(mod)) { - assert(!peer_ty.isTuple()); + if (operand_ty.isTuple(mod) and peer_ty.isIndexable(mod)) { + assert(!peer_ty.isTuple(mod)); return .{ .elem_type = peer_ty.elemType2(mod), .sentinel = null, @@ -12930,12 +12952,13 @@ fn analyzeTupleMul( operand: Air.Inst.Ref, factor: u64, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); const src = LazySrcLoc.nodeOffset(src_node); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node }; - const tuple_len = operand_ty.structFieldCount(); + const tuple_len = operand_ty.structFieldCount(mod); const final_len_u64 = std.math.mul(u64, tuple_len, factor) catch return sema.fail(block, rhs_src, "operation results in overflow", .{}); @@ -12951,8 +12974,8 @@ fn analyzeTupleMul( var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < tuple_len) : (i += 1) { - types[i] = operand_ty.structFieldType(i); - values[i] = operand_ty.structFieldDefaultValue(i); + types[i] = operand_ty.structFieldType(i, mod); + values[i] = operand_ty.structFieldDefaultValue(i, mod); const operand_src = lhs_src; // TODO better source location if (values[i].ip_index == .unreachable_value) { runtime_src = operand_src; @@ -13006,7 +13029,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const operator_src: LazySrcLoc = .{ .node_offset_main_token = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - if (lhs_ty.isTuple()) { + if (lhs_ty.isTuple(mod)) { // In `**` rhs must be comptime-known, but lhs can be runtime-known const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, "array multiplication factor must be comptime-known"); return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor); @@ -14502,7 +14525,7 @@ fn zirOverflowArithmetic( const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2); element_refs[0] = result.inst; - element_refs[1] = try sema.addConstant(tuple_ty.structFieldType(1), result.overflow_bit); + element_refs[1] = try sema.addConstant(tuple_ty.structFieldType(1, mod), result.overflow_bit); return block.addAggregateInit(tuple_ty, element_refs); } @@ -16378,7 +16401,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const union_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout - const layout = union_ty.containerLayout(); + const layout = union_ty.containerLayout(mod); const union_fields = union_ty.unionFields(); const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count()); @@ -16484,7 +16507,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const struct_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout - const layout = struct_ty.containerLayout(); + const layout = struct_ty.containerLayout(mod); const struct_field_vals = fv: { if (struct_ty.isSimpleTupleOrAnonStruct()) { @@ -16532,7 +16555,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } break :fv struct_field_vals; } - const struct_fields = struct_ty.structFields(); + const struct_fields = struct_ty.structFields(mod); const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_fields.count()); for (struct_field_vals, 0..) |*field_val, i| { @@ -16600,7 +16623,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const backing_integer_val = blk: { if (layout == .Packed) { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; assert(struct_obj.haveLayout()); assert(struct_obj.backing_int_ty.isInt(mod)); const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty); @@ -16624,7 +16647,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_tuple: bool, - Value.makeBool(struct_ty.isTuple()), + Value.makeBool(struct_ty.isTuple(mod)), }; return sema.addConstant( @@ -17801,12 +17824,13 @@ fn structInitEmpty( dest_src: LazySrcLoc, init_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; // This logic must be synchronized with that in `zirStructInit`. const struct_ty = try sema.resolveTypeFields(obj_ty); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount()); + const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod)); defer gpa.free(field_inits); @memset(field_inits, .none); @@ -17897,18 +17921,18 @@ fn zirStructInit( // Maps field index to field_type index of where it was already initialized. // For making sure all fields are accounted for and no fields are duplicated. - const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount()); + const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount(mod)); defer gpa.free(found_fields); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount()); + const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount(mod)); defer gpa.free(field_inits); @memset(field_inits, .none); var field_i: u32 = 0; var extra_index = extra.end; - const is_packed = resolved_ty.containerLayout() == .Packed; + const is_packed = resolved_ty.containerLayout(mod) == .Packed; while (field_i < extra.data.fields_len) : (field_i += 1) { const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index); extra_index = item.end; @@ -17917,7 +17941,7 @@ fn zirStructInit( const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); - const field_index = if (resolved_ty.isTuple()) + const field_index = if (resolved_ty.isTuple(mod)) try sema.tupleFieldIndex(block, resolved_ty, field_name, field_src) else try sema.structFieldIndex(block, resolved_ty, field_name, field_src); @@ -17940,7 +17964,7 @@ fn zirStructInit( return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index), sema.mod)) { + if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, mod), sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index); } }; @@ -18029,13 +18053,13 @@ fn finishStructInit( field_inits[i] = try sema.addConstant(struct_obj.types[i], default_val); } } - } else if (struct_ty.isTuple()) { + } else if (struct_ty.isTuple(mod)) { var i: u32 = 0; - const len = struct_ty.structFieldCount(); + const len = struct_ty.structFieldCount(mod); while (i < len) : (i += 1) { if (field_inits[i] != .none) continue; - const default_val = struct_ty.structFieldDefaultValue(i); + const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.ip_index == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -18044,11 +18068,11 @@ fn finishStructInit( root_msg = try sema.errMsg(block, init_src, template, .{i}); } } else { - field_inits[i] = try sema.addConstant(struct_ty.structFieldType(i), default_val); + field_inits[i] = try sema.addConstant(struct_ty.structFieldType(i, mod), default_val); } } } else { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; for (struct_obj.fields.values(), 0..) |field, i| { if (field_inits[i] != .none) continue; @@ -18068,11 +18092,11 @@ fn finishStructInit( } if (root_msg) |msg| { - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod); + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const fqn = try struct_obj.getFullyQualifiedName(sema.mod); defer gpa.free(fqn); try sema.mod.errNoteNonLazy( - struct_obj.data.srcLoc(sema.mod), + struct_obj.srcLoc(sema.mod), msg, "struct '{s}' declared here", .{fqn}, @@ -18277,7 +18301,7 @@ fn zirArrayInit( for (args[1..], 0..) |arg, i| { const resolved_arg = try sema.resolveInst(arg); const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) - array_ty.structFieldType(i) + array_ty.structFieldType(i, mod) else array_ty.elemType2(mod); resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) { @@ -18331,12 +18355,12 @@ fn zirArrayInit( }); const alloc = try block.addTy(.alloc, alloc_ty); - if (array_ty.isTuple()) { + if (array_ty.isTuple(mod)) { for (resolved_args, 0..) |arg, i| { const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = array_ty.structFieldType(i), + .pointee_type = array_ty.structFieldType(i, mod), }); const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty); @@ -18514,7 +18538,7 @@ fn fieldType( const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); return sema.addType(cur_ty.tupleFields().types[field_index]); } - const struct_obj = cur_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(cur_ty).?; const field = struct_obj.fields.get(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); return sema.addType(field.ty); @@ -19185,13 +19209,13 @@ fn zirReify( .file_scope = block.getFileScope(mod), }); const new_namespace = mod.namespacePtr(new_namespace_index); - errdefer @panic("TODO error handling"); + errdefer mod.destroyNamespace(new_namespace_index); const opaque_ty = try mod.intern_pool.get(gpa, .{ .opaque_type = .{ .decl = new_decl_index, .namespace = new_namespace_index, } }); - errdefer @panic("TODO error handling"); + errdefer mod.intern_pool.remove(opaque_ty); new_decl.val = opaque_ty.toValue(); new_namespace.ty = opaque_ty.toType(); @@ -19493,22 +19517,34 @@ fn reifyStruct( name_strategy: Zir.Inst.NameStrategy, is_tuple: bool, ) CompileError!Air.Inst.Ref { - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + const mod = sema.mod; + const gpa = sema.gpa; + + var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); - const struct_obj = try new_decl_arena_allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); - const new_struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); - const mod = sema.mod; + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = new_struct_val, + .val = undefined, }, name_strategy, "struct", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - struct_obj.* = .{ + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const struct_index = try mod.createStruct(.{ .owner_decl = new_decl_index, .fields = .{}, .zir_index = inst, @@ -19516,12 +19552,19 @@ fn reifyStruct( .status = .have_field_types, .known_non_opv = false, .is_tuple = is_tuple, - .namespace = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = struct_ty, - .file_scope = block.getFileScope(mod), - }), - }; + .namespace = new_namespace_index, + }); + const struct_obj = mod.structPtr(struct_index); + errdefer mod.destroyStruct(struct_index); + + const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ + .index = struct_index.toOptional(), + .namespace = new_namespace_index.toOptional(), + } }); + errdefer mod.intern_pool.remove(struct_ty); + + new_decl.val = struct_ty.toValue(); + new_namespace.ty = struct_ty.toType(); // Fields const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); @@ -19609,7 +19652,7 @@ fn reifyStruct( if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19619,7 +19662,7 @@ fn reifyStruct( if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19629,7 +19672,7 @@ fn reifyStruct( if (struct_obj.layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) { const msg = msg: { const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = sema.mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .struct_field); @@ -19641,7 +19684,7 @@ fn reifyStruct( } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = sema.mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty); @@ -19660,7 +19703,7 @@ fn reifyStruct( sema.resolveTypeLayout(field.ty) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; - try sema.addFieldErrNote(struct_ty, index, msg, "while checking this field", .{}); + try sema.addFieldErrNote(struct_ty.toType(), index, msg, "while checking this field", .{}); return err; }, else => return err, @@ -20558,21 +20601,21 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 }, } - const field_index = if (ty.isTuple()) blk: { + const field_index = if (ty.isTuple(mod)) blk: { if (mem.eql(u8, field_name, "len")) { return sema.fail(block, src, "no offset available for 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src); } else try sema.structFieldIndex(block, ty, field_name, rhs_src); - if (ty.structFieldIsComptime(field_index)) { + if (ty.structFieldIsComptime(field_index, mod)) { return sema.fail(block, src, "no offset available for comptime field", .{}); } - switch (ty.containerLayout()) { + switch (ty.containerLayout(mod)) { .Packed => { var bit_sum: u64 = 0; - const fields = ty.structFields(); + const fields = ty.structFields(mod); for (fields.values(), 0..) |field, i| { if (i == field_index) { return bit_sum; @@ -21810,6 +21853,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const modifier_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const func_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -21869,11 +21913,11 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const args = try sema.resolveInst(extra.args); const args_ty = sema.typeOf(args); - if (!args_ty.isTuple() and args_ty.ip_index != .empty_struct_type) { + if (!args_ty.isTuple(mod) and args_ty.ip_index != .empty_struct_type) { return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)}); } - var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount()); + var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod)); for (resolved_args, 0..) |*resolved, i| { resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty); } @@ -21905,7 +21949,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const field_index = switch (parent_ty.zigTypeTag(mod)) { .Struct => blk: { - if (parent_ty.isTuple()) { + if (parent_ty.isTuple(mod)) { if (mem.eql(u8, field_name, "len")) { return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{}); } @@ -21918,7 +21962,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr else => unreachable, }; - if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index)) { + if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index, mod)) { return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{}); } @@ -21926,17 +21970,17 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod); var ptr_ty_data: Type.Payload.Pointer.Data = .{ - .pointee_type = parent_ty.structFieldType(field_index), + .pointee_type = parent_ty.structFieldType(field_index, mod), .mutable = field_ptr_ty_info.mutable, .@"addrspace" = field_ptr_ty_info.@"addrspace", }; - if (parent_ty.containerLayout() == .Packed) { + if (parent_ty.containerLayout(mod) == .Packed) { return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{}); } else { ptr_ty_data.@"align" = blk: { - if (parent_ty.castTag(.@"struct")) |struct_obj| { - break :blk struct_obj.data.fields.values()[field_index].abi_align; + if (mod.typeToStruct(parent_ty)) |struct_obj| { + break :blk struct_obj.fields.values()[field_index].abi_align; } else if (parent_ty.cast(Type.Payload.Union)) |union_obj| { break :blk union_obj.data.fields.values()[field_index].abi_align; } else { @@ -23380,8 +23424,7 @@ fn explainWhyTypeIsComptimeInner( .Struct => { if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return; - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(ty)) |struct_obj| { for (struct_obj.fields.values(), 0..) |field, i| { const field_src_loc = struct_obj.fieldSrcLoc(sema.mod, .{ .index = i, @@ -23472,7 +23515,7 @@ fn validateExternType( .Enum => { return sema.validateExternType(try ty.intTagType(mod), position); }, - .Struct, .Union => switch (ty.containerLayout()) { + .Struct, .Union => switch (ty.containerLayout(mod)) { .Extern => return true, .Packed => { const bit_size = try ty.bitSizeAdvanced(mod, sema); @@ -23569,7 +23612,7 @@ fn explainWhyTypeIsNotExtern( /// Returns true if `ty` is allowed in packed types. /// Does *NOT* require `ty` to be resolved in any way. -fn validatePackedType(ty: Type, mod: *const Module) bool { +fn validatePackedType(ty: Type, mod: *Module) bool { switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, @@ -23595,7 +23638,7 @@ fn validatePackedType(ty: Type, mod: *const Module) bool { .Enum, => return true, .Pointer => return !ty.isSlice(mod), - .Struct, .Union => return ty.containerLayout() == .Packed, + .Struct, .Union => return ty.containerLayout(mod) == .Packed, } } @@ -24419,27 +24462,27 @@ fn fieldCallBind( switch (concrete_ty.zigTypeTag(mod)) { .Struct => { const struct_ty = try sema.resolveTypeFields(concrete_ty); - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const field_index_usize = struct_obj.data.fields.getIndex(field_name) orelse + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const field_index_usize = struct_obj.fields.getIndex(field_name) orelse break :find_field; const field_index = @intCast(u32, field_index_usize); - const field = struct_obj.data.fields.values()[field_index]; + const field = struct_obj.fields.values()[field_index]; return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr); - } else if (struct_ty.isTuple()) { + } else if (struct_ty.isTuple(mod)) { if (mem.eql(u8, field_name, "len")) { - return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount()) }; + return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)) }; } if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { - if (field_index >= struct_ty.structFieldCount()) break :find_field; - return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index), field_index, object_ptr); + if (field_index >= struct_ty.structFieldCount(mod)) break :find_field; + return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index, mod), field_index, object_ptr); } else |_| {} } else { - const max = struct_ty.structFieldCount(); + const max = struct_ty.structFieldCount(mod); var i: u32 = 0; while (i < max) : (i += 1) { - if (mem.eql(u8, struct_ty.structFieldName(i), field_name)) { - return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i), i, object_ptr); + if (mem.eql(u8, struct_ty.structFieldName(i, mod), field_name)) { + return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i, mod), i, object_ptr); } } } @@ -24651,9 +24694,9 @@ fn structFieldPtr( const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); try sema.resolveStructLayout(struct_ty); - if (struct_ty.isTuple()) { + if (struct_ty.isTuple(mod)) { if (mem.eql(u8, field_name, "len")) { - const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount()); + const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)); return sema.analyzeRef(block, src, len_inst); } const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src); @@ -24663,7 +24706,7 @@ fn structFieldPtr( return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); } - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const field_index_big = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); @@ -24687,7 +24730,7 @@ fn structFieldPtrByIndex( } const mod = sema.mod; - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const field = struct_obj.fields.values()[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod); @@ -24799,8 +24842,11 @@ fn structFieldVal( const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); }, - .@"struct" => { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); const field_index_usize = struct_obj.fields.getIndex(field_name) orelse @@ -24827,7 +24873,6 @@ fn structFieldVal( }, else => unreachable, }, - else => unreachable, } } @@ -24840,8 +24885,9 @@ fn tupleFieldVal( field_name_src: LazySrcLoc, tuple_ty: Type, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; if (mem.eql(u8, field_name, "len")) { - return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount()); + return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount(mod)); } const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src); return sema.tupleFieldValByIndex(block, src, tuple_byval, field_index, tuple_ty); @@ -24858,7 +24904,7 @@ fn tupleFieldIndex( const mod = sema.mod; assert(!std.mem.eql(u8, field_name, "len")); if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { - if (field_index < tuple_ty.structFieldCount()) return field_index; + if (field_index < tuple_ty.structFieldCount(mod)) return field_index; return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{ field_name, tuple_ty.fmt(mod), }); @@ -24878,7 +24924,7 @@ fn tupleFieldValByIndex( tuple_ty: Type, ) CompileError!Air.Inst.Ref { const mod = sema.mod; - const field_ty = tuple_ty.structFieldType(field_index); + const field_ty = tuple_ty.structFieldType(field_index, mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); @@ -25251,7 +25297,7 @@ fn tupleFieldPtr( const tuple_ptr_ty = sema.typeOf(tuple_ptr); const tuple_ty = tuple_ptr_ty.childType(mod); _ = try sema.resolveTypeFields(tuple_ty); - const field_count = tuple_ty.structFieldCount(); + const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { return sema.fail(block, tuple_ptr_src, "indexing into empty tuple is not allowed", .{}); @@ -25263,7 +25309,7 @@ fn tupleFieldPtr( }); } - const field_ty = tuple_ty.structFieldType(field_index); + const field_ty = tuple_ty.structFieldType(field_index, mod); const ptr_field_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = field_ty, .mutable = tuple_ptr_ty.ptrIsMutable(mod), @@ -25308,7 +25354,7 @@ fn tupleField( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const tuple_ty = try sema.resolveTypeFields(sema.typeOf(tuple)); - const field_count = tuple_ty.structFieldCount(); + const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { return sema.fail(block, tuple_src, "indexing into empty tuple is not allowed", .{}); @@ -25320,7 +25366,7 @@ fn tupleField( }); } - const field_ty = tuple_ty.structFieldType(field_index); + const field_ty = tuple_ty.structFieldType(field_index, mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); // comptime field @@ -25919,7 +25965,7 @@ fn coerceExtra( .Array => { // pointer to tuple to pointer to array if (inst_ty.isSinglePointer(mod) and - inst_ty.childType(mod).isTuple() and + inst_ty.childType(mod).isTuple(mod) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25939,11 +25985,11 @@ fn coerceExtra( if (!inst_ty.isSinglePointer(mod)) break :to_slice; const inst_child_ty = inst_ty.childType(mod); - if (!inst_child_ty.isTuple()) break :to_slice; + if (!inst_child_ty.isTuple(mod)) break :to_slice; // empty tuple to zero-length slice // note that this allows coercing to a mutable slice. - if (inst_child_ty.structFieldCount() == 0) { + if (inst_child_ty.structFieldCount(mod) == 0) { // Optional slice is represented with a null pointer so // we use a dummy pointer value with the required alignment. const slice_val = try Value.Tag.slice.create(sema.arena, .{ @@ -26213,7 +26259,7 @@ fn coerceExtra( if (inst == .empty_struct) { return sema.arrayInitEmpty(block, inst_src, dest_ty); } - if (inst_ty.isTuple()) { + if (inst_ty.isTuple(mod)) { return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src); } }, @@ -26225,7 +26271,7 @@ fn coerceExtra( .Vector => switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { - if (inst_ty.isTuple()) { + if (inst_ty.isTuple(mod)) { return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src); } }, @@ -26238,7 +26284,7 @@ fn coerceExtra( if (inst == .empty_struct) { return sema.structInitEmpty(block, dest_ty, dest_ty_src, inst_src); } - if (inst_ty.isTupleOrAnonStruct()) { + if (inst_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToStruct(block, dest_ty, inst, inst_src) catch |err| switch (err) { error.NotCoercible => break :blk, else => |e| return e, @@ -27304,8 +27350,8 @@ fn storePtr2( // this code does not handle tuple-to-struct coercion which requires dealing with missing // fields. const operand_ty = sema.typeOf(uncasted_operand); - if (operand_ty.isTuple() and elem_ty.zigTypeTag(mod) == .Array) { - const field_count = operand_ty.structFieldCount(); + if (operand_ty.isTuple(mod) and elem_ty.zigTypeTag(mod) == .Array) { + const field_count = operand_ty.structFieldCount(mod); var i: u32 = 0; while (i < field_count) : (i += 1) { const elem_src = operand_src; // TODO better source location @@ -27804,7 +27850,7 @@ fn beginComptimePtrMutation( switch (parent.ty.zigTypeTag(mod)) { .Struct => { - const fields = try arena.alloc(Value, parent.ty.structFieldCount()); + const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); @memset(fields, Value.undef); val_ptr.* = try Value.Tag.aggregate.create(arena, fields); @@ -27813,7 +27859,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.structFieldType(field_index), + parent.ty.structFieldType(field_index, mod), &fields[field_index], ptr_elem_ty, parent.decl_ref_mut, @@ -27832,7 +27878,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.structFieldType(field_index), + parent.ty.structFieldType(field_index, mod), &payload.data.val, ptr_elem_ty, parent.decl_ref_mut, @@ -27878,7 +27924,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.structFieldType(field_index), + parent.ty.structFieldType(field_index, mod), duped, ptr_elem_ty, parent.decl_ref_mut, @@ -27889,7 +27935,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.structFieldType(field_index), + parent.ty.structFieldType(field_index, mod), &val_ptr.castTag(.aggregate).?.data[field_index], ptr_elem_ty, parent.decl_ref_mut, @@ -27907,7 +27953,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.structFieldType(field_index), + parent.ty.structFieldType(field_index, mod), &payload.val, ptr_elem_ty, parent.decl_ref_mut, @@ -28269,8 +28315,8 @@ fn beginComptimePtrLoad( var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); if (field_ptr.container_ty.hasWellDefinedLayout(mod)) { - const struct_ty = field_ptr.container_ty.castTag(.@"struct"); - if (struct_ty != null and struct_ty.?.data.layout == .Packed) { + const struct_obj = mod.typeToStruct(field_ptr.container_ty); + if (struct_obj != null and struct_obj.?.layout == .Packed) { // packed structs are not byte addressable deref.parent = null; } else if (deref.parent) |*parent| { @@ -28310,7 +28356,7 @@ fn beginComptimePtrLoad( else => unreachable, }; } else { - const field_ty = field_ptr.container_ty.structFieldType(field_index); + const field_ty = field_ptr.container_ty.structFieldType(field_index, mod); deref.pointee = TypedValue{ .ty = field_ty, .val = try tv.val.fieldValue(tv.ty, mod, field_index), @@ -28483,7 +28529,7 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul const inst_info = inst_ty.ptrInfo(mod); const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel(mod) == 0 or (inst_info.pointee_type.arrayLen(mod) == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or - (inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0); + (inst_info.pointee_type.isTuple(mod) and inst_info.pointee_type.structFieldCount(mod) == 0); const ok_cv_qualifiers = ((inst_info.mutable or !dest_info.mutable) or len0) and @@ -28714,8 +28760,9 @@ fn coerceAnonStructToUnion( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const field_count = inst_ty.structFieldCount(); + const field_count = inst_ty.structFieldCount(mod); if (field_count != 1) { const msg = msg: { const msg = if (field_count > 1) try sema.errMsg( @@ -28927,7 +28974,7 @@ fn coerceTupleToSlicePtrs( const tuple_ty = sema.typeOf(ptr_tuple).childType(mod); const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); const slice_info = slice_ty.ptrInfo(mod); - const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(), slice_info.sentinel, slice_info.pointee_type, sema.mod); + const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(mod), slice_info.sentinel, slice_info.pointee_type, sema.mod); const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src); if (slice_info.@"align" != 0) { return sema.fail(block, slice_ty_src, "TODO: override the alignment of the array decl we create here", .{}); @@ -28966,20 +29013,21 @@ fn coerceTupleToStruct( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const struct_ty = try sema.resolveTypeFields(dest_ty); - if (struct_ty.isTupleOrAnonStruct()) { + if (struct_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src); } - const fields = struct_ty.structFields(); + const fields = struct_ty.structFields(mod); const field_vals = try sema.arena.alloc(Value, fields.count()); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); var runtime_src: ?LazySrcLoc = null; - const field_count = inst_ty.structFieldCount(); + const field_count = inst_ty.structFieldCount(mod); var field_i: u32 = 0; while (field_i < field_count) : (field_i += 1) { const field_src = inst_src; // TODO better source location @@ -29061,13 +29109,14 @@ fn coerceTupleToTuple( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const dest_field_count = tuple_ty.structFieldCount(); + const mod = sema.mod; + const dest_field_count = tuple_ty.structFieldCount(mod); const field_vals = try sema.arena.alloc(Value, dest_field_count); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const inst_field_count = inst_ty.structFieldCount(); + const inst_field_count = inst_ty.structFieldCount(mod); if (inst_field_count > dest_field_count) return error.NotCoercible; var runtime_src: ?LazySrcLoc = null; @@ -29085,8 +29134,8 @@ fn coerceTupleToTuple( const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src); - const field_ty = tuple_ty.structFieldType(field_i); - const default_val = tuple_ty.structFieldDefaultValue(field_i); + const field_ty = tuple_ty.structFieldType(field_i, mod); + const default_val = tuple_ty.structFieldDefaultValue(field_i, mod); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); const coerced = try sema.coerce(block, field_ty, elem_ref, field_src); field_refs[field_index] = coerced; @@ -29115,12 +29164,12 @@ fn coerceTupleToTuple( for (field_refs, 0..) |*field_ref, i| { if (field_ref.* != .none) continue; - const default_val = tuple_ty.structFieldDefaultValue(i); - const field_ty = tuple_ty.structFieldType(i); + const default_val = tuple_ty.structFieldDefaultValue(i, mod); + const field_ty = tuple_ty.structFieldType(i, mod); const field_src = inst_src; // TODO better source location if (default_val.ip_index == .unreachable_value) { - if (tuple_ty.isTuple()) { + if (tuple_ty.isTuple(mod)) { const template = "missing tuple field: {d}"; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, .{i}); @@ -29130,7 +29179,7 @@ fn coerceTupleToTuple( continue; } const template = "missing struct field: {s}"; - const args = .{tuple_ty.structFieldName(i)}; + const args = .{tuple_ty.structFieldName(i, mod)}; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, args); } else { @@ -31222,17 +31271,17 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { } fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - if (resolved_ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(resolved_ty)) |struct_obj| { switch (struct_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { const msg = try Module.ErrorMsg.create( sema.gpa, - struct_obj.srcLoc(sema.mod), + struct_obj.srcLoc(mod), "struct '{}' depends on itself", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); return sema.failWithOwnedErrorMsg(msg); }, @@ -31256,7 +31305,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { } if (struct_obj.layout == .Packed) { - try semaBackingIntType(sema.mod, struct_obj); + try semaBackingIntType(mod, struct_obj); } struct_obj.status = .have_layout; @@ -31265,20 +31314,20 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) { const msg = try Module.ErrorMsg.create( sema.gpa, - struct_obj.srcLoc(sema.mod), + struct_obj.srcLoc(mod), "struct layout depends on it having runtime bits", .{}, ); return sema.failWithOwnedErrorMsg(msg); } - if (struct_obj.layout == .Auto and sema.mod.backendSupportsFeature(.field_reordering)) { + if (struct_obj.layout == .Auto and mod.backendSupportsFeature(.field_reordering)) { const optimized_order = if (struct_obj.owner_decl == sema.owner_decl_index) try sema.perm_arena.alloc(u32, struct_obj.fields.count()) else blk: { - const decl = sema.mod.declPtr(struct_obj.owner_decl); + const decl = mod.declPtr(struct_obj.owner_decl); var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(sema.mod.gpa, &decl_arena); + const decl_arena_allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count()); }; @@ -31528,7 +31577,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return switch (ty.ip_index) { .empty_struct_type => false, .none => switch (ty.tag()) { - .empty_struct, .error_set, .error_set_single, .error_set_inferred, @@ -31569,27 +31617,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return false; }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - var requires_comptime = false; - struct_obj.requires_comptime = .wip; - for (struct_obj.fields.values()) |field| { - if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; - } - if (requires_comptime) { - struct_obj.requires_comptime = .yes; - } else { - struct_obj.requires_comptime = .no; - } - return requires_comptime; - }, - } - }, - .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Type.Payload.Union).?.data; switch (union_obj.requires_comptime) { @@ -31686,7 +31713,27 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .type_info, => true, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + switch (struct_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + var requires_comptime = false; + struct_obj.requires_comptime = .wip; + for (struct_obj.fields.values()) |field| { + if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; + } + if (requires_comptime) { + struct_obj.requires_comptime = .yes; + } else { + struct_obj.requires_comptime = .no; + } + return requires_comptime; + }, + } + }, + .union_type => @panic("TODO"), .opaque_type => false, @@ -31697,6 +31744,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, }; } @@ -31710,16 +31758,21 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { const child_ty = try sema.resolveTypeFields(ty.childType(mod)); return sema.resolveTypeFully(child_ty); }, - .Struct => switch (ty.tag()) { - .@"struct" => return sema.resolveStructFully(ty), - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); + .Struct => switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple, .anon_struct => { + const tuple = ty.tupleFields(); - for (tuple.types) |field_ty| { - try sema.resolveTypeFully(field_ty); - } + for (tuple.types) |field_ty| { + try sema.resolveTypeFully(field_ty); + } + }, + else => {}, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => return sema.resolveStructFully(ty), + else => {}, }, - else => {}, }, .Union => return sema.resolveUnionFully(ty), .Array => return sema.resolveTypeFully(ty.childType(mod)), @@ -31746,9 +31799,9 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { try sema.resolveStructLayout(ty); + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - const payload = resolved_ty.castTag(.@"struct").?; - const struct_obj = payload.data; + const struct_obj = mod.typeToStruct(resolved_ty).?; switch (struct_obj.status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, @@ -31806,11 +31859,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { switch (ty.ip_index) { .none => switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - try sema.resolveTypeFieldsStruct(ty, struct_obj); - return ty; - }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Type.Payload.Union).?.data; try sema.resolveTypeFieldsUnion(ty, union_obj); @@ -31904,7 +31952,11 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .prefetch_options_type => return sema.getBuiltinType("PrefetchOptions"), _ => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return ty; + try sema.resolveTypeFieldsStruct(ty, struct_obj); + return ty; + }, .union_type => @panic("TODO"), else => return ty, }, @@ -33010,28 +33062,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } }, - .@"struct" => { - const resolved_ty = try sema.resolveTypeFields(ty); - const s = resolved_ty.castTag(.@"struct").?.data; - for (s.fields.values(), 0..) |field, i| { - if (field.is_comptime) continue; - if (field.ty.eql(resolved_ty, sema.mod)) { - const msg = try Module.ErrorMsg.create( - sema.gpa, - s.srcLoc(sema.mod), - "struct '{}' depends on itself", - .{ty.fmt(sema.mod)}, - ); - try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(msg); - } - if ((try sema.typeHasOnePossibleValue(field.ty)) == null) { - return null; - } - } - return Value.empty_struct; - }, - .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.values, 0..) |val, i| { @@ -33120,8 +33150,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }); }, - .empty_struct => return Value.empty_struct, - .array => { if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); @@ -33212,7 +33240,34 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .generic_poison => return error.GenericPoison, .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const resolved_ty = try sema.resolveTypeFields(ty); + if (mod.structPtrUnwrap(struct_type.index)) |s| { + for (s.fields.values(), 0..) |field, i| { + if (field.is_comptime) continue; + if (field.ty.eql(resolved_ty, sema.mod)) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + s.srcLoc(sema.mod), + "struct '{}' depends on itself", + .{ty.fmt(sema.mod)}, + ); + try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{}); + return sema.failWithOwnedErrorMsg(msg); + } + if ((try sema.typeHasOnePossibleValue(field.ty)) == null) { + return null; + } + } + } + // In this case the struct has no fields and therefore has one possible value. + const empty = try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .fields = &.{}, + } }); + return empty.toValue(); + }, + .union_type => @panic("TODO"), .opaque_type => null, @@ -33223,6 +33278,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, } } @@ -33614,7 +33670,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .empty_struct_type => false, .none => switch (ty.tag()) { - .empty_struct, .error_set, .error_set_single, .error_set_inferred, @@ -33655,31 +33710,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return false; }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - if (struct_obj.status == .field_types_wip) - return false; - - try sema.resolveTypeFieldsStruct(ty, struct_obj); - - struct_obj.requires_comptime = .wip; - for (struct_obj.fields.values()) |field| { - if (field.is_comptime) continue; - if (try sema.typeRequiresComptime(field.ty)) { - struct_obj.requires_comptime = .yes; - return true; - } - } - struct_obj.requires_comptime = .no; - return false; - }, - } - }, - .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Type.Payload.Union).?.data; switch (union_obj.requires_comptime) { @@ -33782,7 +33812,31 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + switch (struct_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (struct_obj.status == .field_types_wip) + return false; + + try sema.resolveTypeFieldsStruct(ty, struct_obj); + + struct_obj.requires_comptime = .wip; + for (struct_obj.fields.values()) |field| { + if (field.is_comptime) continue; + if (try sema.typeRequiresComptime(field.ty)) { + struct_obj.requires_comptime = .yes; + return true; + } + } + struct_obj.requires_comptime = .no; + return false; + }, + } + }, + .union_type => @panic("TODO"), .opaque_type => false, @@ -33793,6 +33847,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, }; } @@ -33864,11 +33919,12 @@ fn structFieldIndex( field_name: []const u8, field_src: LazySrcLoc, ) !u32 { + const mod = sema.mod; const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); if (struct_ty.isAnonStruct()) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const field_index_usize = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); return @intCast(u32, field_index_usize); diff --git a/src/TypedValue.zig b/src/TypedValue.zig index fae637cf2484..2105d3108f3c 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -180,7 +180,7 @@ pub fn print( switch (field_ptr.container_ty.tag()) { .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), else => { - const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index); + const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod); return writer.print(".{s}", .{field_name}); }, } @@ -381,21 +381,27 @@ fn printAggregate( } if (ty.zigTypeTag(mod) == .Struct) { try writer.writeAll(".{"); - const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); + const max_len = std.math.min(ty.structFieldCount(mod), max_aggregate_items); var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); - switch (ty.tag()) { - .anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}), - else => {}, + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .anon_struct => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), + else => {}, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), + else => {}, + }, } try print(.{ - .ty = ty.structFieldType(i), + .ty = ty.structFieldType(i, mod), .val = try val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } - if (ty.structFieldCount() > max_aggregate_items) { + if (ty.structFieldCount(mod) > max_aggregate_items) { try writer.writeAll(", ..."); } return writer.writeAll("}"); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 970d59a25f53..3e893411fccb 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4119,7 +4119,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); - const struct_field_ty = struct_ty.structFieldType(index); + const struct_field_ty = struct_ty.structFieldType(index, mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -5466,10 +5466,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(rwo.reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0); + const wrapped_ty = ty.structFieldType(0, mod); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); - const overflow_bit_ty = ty.structFieldType(1); + const overflow_bit_ty = ty.structFieldType(1, mod); const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const raw_cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 1d042b632a67..6589425fc229 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -21,7 +21,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class { var maybe_float_bits: ?u16 = null; switch (ty.zigTypeTag(mod)) { .Struct => { - if (ty.containerLayout() == .Packed) return .byval; + if (ty.containerLayout(mod) == .Packed) return .byval; const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; @@ -31,7 +31,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class { return .integer; }, .Union => { - if (ty.containerLayout() == .Packed) return .byval; + if (ty.containerLayout(mod) == .Packed) return .byval; const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; @@ -90,11 +90,11 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { return max_count; }, .Struct => { - const fields_len = ty.structFieldCount(); + const fields_len = ty.structFieldCount(mod); var count: u8 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i); + const field_ty = ty.structFieldType(i, mod); const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; @@ -125,10 +125,10 @@ pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type { return null; }, .Struct => { - const fields_len = ty.structFieldCount(); + const fields_len = ty.structFieldCount(mod); var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i); + const field_ty = ty.structFieldType(i, mod); if (getFloatArrayType(field_ty, mod)) |some| return some; } return null; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 50f6d76c55a7..5cc165fdfe73 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2910,7 +2910,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); - const struct_field_ty = struct_ty.structFieldType(index); + const struct_field_ty = struct_ty.structFieldType(index, mod); switch (mcv) { .dead, .unreach => unreachable, @@ -5404,10 +5404,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0); + const wrapped_ty = ty.structFieldType(0, mod); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); - const overflow_bit_ty = ty.structFieldType(1); + const overflow_bit_ty = ty.structFieldType(1, mod); const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index 79ffadf831e6..7a7d632837df 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -32,7 +32,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { switch (ty.zigTypeTag(mod)) { .Struct => { const bit_size = ty.bitSize(mod); - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (bit_size > 64) return .memory; return .byval; } @@ -40,10 +40,10 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; - const fields = ty.structFieldCount(); + const fields = ty.structFieldCount(mod); var i: u32 = 0; while (i < fields) : (i += 1) { - const field_ty = ty.structFieldType(i); + const field_ty = ty.structFieldType(i, mod); const field_alignment = ty.structFieldAlign(i, mod); const field_size = field_ty.bitSize(mod); if (field_size > 32 or field_alignment > 32) { @@ -54,7 +54,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { }, .Union => { const bit_size = ty.bitSize(mod); - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (bit_size > 64) return .memory; return .byval; } @@ -132,11 +132,11 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 { return max_count; }, .Struct => { - const fields_len = ty.structFieldCount(); + const fields_len = ty.structFieldCount(mod); var count: u32 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i); + const field_ty = ty.structFieldType(i, mod); const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 28a69d913611..41a18506356c 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -15,7 +15,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class { switch (ty.zigTypeTag(mod)) { .Struct => { const bit_size = ty.bitSize(mod); - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; } @@ -26,7 +26,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class { }, .Union => { const bit_size = ty.bitSize(mod); - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; } diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 0490db615b90..0677b72f1a96 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -3993,10 +3993,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(rwo.reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0); + const wrapped_ty = ty.structFieldType(0, mod); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); - const overflow_bit_ty = ty.structFieldType(1); + const overflow_bit_ty = ty.structFieldType(1, mod); const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b484e21424ad..90c26d5d84be 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1006,9 +1006,9 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64; break :blk wasm.Valtype.i32; // represented as pointer to stack }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Packed => { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(ty).?; return typeToValtype(struct_obj.backing_int_ty, mod); }, else => wasm.Valtype.i32, @@ -1017,7 +1017,7 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { .direct => wasm.Valtype.v128, .unrolled => wasm.Valtype.i32, }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Packed => { const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory"); return typeToValtype(int_ty, mod); @@ -1747,8 +1747,7 @@ fn isByRef(ty: Type, mod: *Module) bool { return ty.hasRuntimeBitsIgnoreComptime(mod); }, .Struct => { - if (ty.castTag(.@"struct")) |struct_ty| { - const struct_obj = struct_ty.data; + if (mod.typeToStruct(ty)) |struct_obj| { if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { return isByRef(struct_obj.backing_int_ty, mod); } @@ -2954,11 +2953,11 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue const parent_ty = field_ptr.container_ty; const field_offset = switch (parent_ty.zigTypeTag(mod)) { - .Struct => switch (parent_ty.containerLayout()) { + .Struct => switch (parent_ty.containerLayout(mod)) { .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod), else => parent_ty.structFieldOffset(field_ptr.field_index, mod), }, - .Union => switch (parent_ty.containerLayout()) { + .Union => switch (parent_ty.containerLayout(mod)) { .Packed => 0, else => blk: { const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod); @@ -3158,7 +3157,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { return WValue{ .imm32 = @boolToInt(is_pl) }; }, .Struct => { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(ty).?; assert(struct_obj.layout == .Packed); var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; @@ -3225,7 +3224,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { return WValue{ .imm32 = 0xaaaaaaaa }; }, .Struct => { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(ty).?; assert(struct_obj.layout == .Packed); return func.emitUndefined(struct_obj.backing_int_ty); }, @@ -3635,7 +3634,7 @@ fn structFieldPtr( ) InnerError!WValue { const mod = func.bin_file.base.options.module.?; const result_ty = func.typeOfIndex(inst); - const offset = switch (struct_ty.containerLayout()) { + const offset = switch (struct_ty.containerLayout(mod)) { .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { if (result_ty.ptrInfo(mod).host_size != 0) { @@ -3668,13 +3667,13 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const struct_ty = func.typeOf(struct_field.struct_operand); const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index); + const field_ty = struct_ty.structFieldType(field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); - const result = switch (struct_ty.containerLayout()) { + const result = switch (struct_ty.containerLayout(mod)) { .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => result: { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const offset = struct_obj.packedFieldBitOffset(mod, field_index); const backing_ty = struct_obj.backing_int_ty; const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse { @@ -4998,12 +4997,12 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } break :result_value result; }, - .Struct => switch (result_ty.containerLayout()) { + .Struct => switch (result_ty.containerLayout(mod)) { .Packed => { if (isByRef(result_ty, mod)) { return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{}); } - const struct_obj = result_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(result_ty).?; const fields = struct_obj.fields.values(); const backing_type = struct_obj.backing_int_ty; @@ -5051,7 +5050,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { for (elements, 0..) |elem, elem_index| { if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_index); + const elem_ty = result_ty.structFieldType(elem_index, mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const value = try func.resolveInst(elem); try func.store(offset, value, elem_ty, 0); diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index bb5911382b7e..ee836bebdb48 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -26,14 +26,14 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class { if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; switch (ty.zigTypeTag(mod)) { .Struct => { - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } // When the struct type is non-scalar - if (ty.structFieldCount() > 1) return memory; + if (ty.structFieldCount(mod) > 1) return memory; // When the struct's alignment is non-natural - const field = ty.structFields().values()[0]; + const field = ty.structFields(mod).values()[0]; if (field.abi_align != 0) { if (field.abi_align > field.ty.abiAlignment(mod)) { return memory; @@ -64,7 +64,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class { return direct; }, .Union => { - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } @@ -96,19 +96,19 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class { pub fn scalarType(ty: Type, mod: *Module) Type { switch (ty.zigTypeTag(mod)) { .Struct => { - switch (ty.containerLayout()) { + switch (ty.containerLayout(mod)) { .Packed => { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(ty).?; return scalarType(struct_obj.backing_int_ty, mod); }, else => { - std.debug.assert(ty.structFieldCount() == 1); - return scalarType(ty.structFieldType(0), mod); + std.debug.assert(ty.structFieldCount(mod) == 1); + return scalarType(ty.structFieldType(0, mod), mod); }, } }, .Union => { - if (ty.containerLayout() != .Packed) { + if (ty.containerLayout(mod) != .Packed) { const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0 and layout.tag_size != 0) { return scalarType(ty.unionTagTypeSafety().?, mod); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 4fb5267cb023..77661b2a14ca 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -3252,13 +3252,13 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, tuple_ty.structFieldOffset(1, mod)), - tuple_ty.structFieldType(1), + tuple_ty.structFieldType(1, mod), .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, tuple_ty.structFieldOffset(0, mod)), - tuple_ty.structFieldType(0), + tuple_ty.structFieldType(0, mod), partial_mcv, ); break :result .{ .load_frame = .{ .index = frame_index } }; @@ -3289,7 +3289,7 @@ fn genSetFrameTruncatedOverflowCompare( }; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const ty = tuple_ty.structFieldType(0); + const ty = tuple_ty.structFieldType(0, mod); const int_info = ty.intInfo(mod); const hi_limb_bits = (int_info.bits - 1) % 64 + 1; @@ -3336,7 +3336,7 @@ fn genSetFrameTruncatedOverflowCompare( try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, tuple_ty.structFieldOffset(1, mod)), - tuple_ty.structFieldType(1), + tuple_ty.structFieldType(1, mod), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, ); } @@ -3393,13 +3393,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, tuple_ty.structFieldOffset(0, mod)), - tuple_ty.structFieldType(0), + tuple_ty.structFieldType(0, mod), partial_mcv, ); try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, tuple_ty.structFieldOffset(1, mod)), - tuple_ty.structFieldType(1), + tuple_ty.structFieldType(1, mod), .{ .immediate = 0 }, // cc being set is impossible ); } else try self.genSetFrameTruncatedOverflowCompare( @@ -5563,7 +5563,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 const ptr_field_ty = self.typeOfIndex(inst); const ptr_container_ty = self.typeOf(operand); const container_ty = ptr_container_ty.childType(mod); - const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { + const field_offset = @intCast(i32, switch (container_ty.containerLayout(mod)) { .Auto, .Extern => container_ty.structFieldOffset(index, mod), .Packed => if (container_ty.zigTypeTag(mod) == .Struct and ptr_field_ty.ptrInfo(mod).host_size == 0) @@ -5591,16 +5591,16 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const container_ty = self.typeOf(operand); const container_rc = regClassForType(container_ty, mod); - const field_ty = container_ty.structFieldType(index); + const field_ty = container_ty.structFieldType(index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; const field_rc = regClassForType(field_ty, mod); const field_is_gp = field_rc.supersetOf(gp); const src_mcv = try self.resolveInst(operand); - const field_off = switch (container_ty.containerLayout()) { + const field_off = switch (container_ty.containerLayout(mod)) { .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8), - .Packed => if (container_ty.castTag(.@"struct")) |struct_obj| - struct_obj.data.packedFieldBitOffset(mod, index) + .Packed => if (mod.typeToStruct(container_ty)) |struct_obj| + struct_obj.packedFieldBitOffset(mod, index) else 0, }; @@ -10036,13 +10036,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal try self.genSetMem( base, disp + @intCast(i32, ty.structFieldOffset(0, mod)), - ty.structFieldType(0), + ty.structFieldType(0, mod), .{ .register = ro.reg }, ); try self.genSetMem( base, disp + @intCast(i32, ty.structFieldOffset(1, mod)), - ty.structFieldType(1), + ty.structFieldType(1, mod), .{ .eflags = ro.eflags }, ); }, @@ -11259,8 +11259,8 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .Struct => { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); - if (result_ty.containerLayout() == .Packed) { - const struct_obj = result_ty.castTag(.@"struct").?.data; + if (result_ty.containerLayout(mod) == .Packed) { + const struct_obj = mod.typeToStruct(result_ty).?; try self.genInlineMemset( .{ .lea_frame = .{ .index = frame_index } }, .{ .immediate = 0 }, @@ -11269,7 +11269,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { for (elements, 0..) |elem, elem_i| { if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i); + const elem_ty = result_ty.structFieldType(elem_i, mod); const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); if (elem_bit_size > 64) { return self.fail( @@ -11341,7 +11341,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } else for (elements, 0..) |elem, elem_i| { if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i); + const elem_ty = result_ty.structFieldType(elem_i, mod); const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index 1bae899d33ce..45ce64a98e7b 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -41,7 +41,7 @@ pub fn classifyWindows(ty: Type, mod: *Module) Class { 1, 2, 4, 8 => return .integer, else => switch (ty.zigTypeTag(mod)) { .Int => return .win_i128, - .Struct, .Union => if (ty.containerLayout() == .Packed) { + .Struct, .Union => if (ty.containerLayout(mod) == .Packed) { return .win_i128; } else { return .memory; @@ -210,7 +210,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". const ty_size = ty.abiSize(mod); - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { assert(ty_size <= 128); result[0] = .integer; if (ty_size > 64) result[1] = .integer; @@ -221,7 +221,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { var result_i: usize = 0; // out of 8 var byte_i: usize = 0; // out of 8 - const fields = ty.structFields(); + const fields = ty.structFields(mod); for (fields.values()) |field| { if (field.abi_align != 0) { if (field.abi_align < field.ty.abiAlignment(mod)) { @@ -329,7 +329,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". const ty_size = ty.abiSize(mod); - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { assert(ty_size <= 128); result[0] = .integer; if (ty_size > 64) result[1] = .integer; diff --git a/src/codegen.zig b/src/codegen.zig index 70df1fc17ba8..b29af1ff931a 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -503,8 +503,8 @@ pub fn generateSymbol( return Result.ok; }, .Struct => { - if (typed_value.ty.containerLayout() == .Packed) { - const struct_obj = typed_value.ty.castTag(.@"struct").?.data; + if (typed_value.ty.containerLayout(mod) == .Packed) { + const struct_obj = mod.typeToStruct(typed_value.ty).?; const fields = struct_obj.fields.values(); const field_vals = typed_value.val.castTag(.aggregate).?.data; const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; @@ -539,7 +539,7 @@ pub fn generateSymbol( const struct_begin = code.items.len; const field_vals = typed_value.val.castTag(.aggregate).?.data; for (field_vals, 0..) |field_val, index| { - const field_ty = typed_value.ty.structFieldType(index); + const field_ty = typed_value.ty.structFieldType(index, mod); if (!field_ty.hasRuntimeBits(mod)) continue; switch (try generateSymbol(bin_file, src_loc, .{ diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 36af222c7ed8..1c1621650409 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -820,7 +820,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, Type.bool, val, initializer_type); return writer.writeAll(" }"); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto, .Extern => { if (!location.isInitializer()) { try writer.writeByte('('); @@ -830,9 +830,9 @@ pub const DeclGen = struct { try writer.writeByte('{'); var empty = true; - for (0..ty.structFieldCount()) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); + for (0..ty.structFieldCount(mod)) |field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBits(mod)) continue; if (!empty) try writer.writeByte(','); @@ -1328,7 +1328,7 @@ pub const DeclGen = struct { }, else => unreachable, }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto, .Extern => { const field_vals = val.castTag(.aggregate).?.data; @@ -1341,8 +1341,8 @@ pub const DeclGen = struct { try writer.writeByte('{'); var empty = true; for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeByte(','); @@ -1363,8 +1363,8 @@ pub const DeclGen = struct { var eff_num_fields: usize = 0; for (0..field_vals.len) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; eff_num_fields += 1; @@ -1386,8 +1386,8 @@ pub const DeclGen = struct { var eff_index: usize = 0; var needs_closing_paren = false; for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const cast_context = IntCastContext{ .value = .{ .value = field_val } }; @@ -1416,8 +1416,8 @@ pub const DeclGen = struct { // a << a_off | b << b_off | c << c_off var empty = true; for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeAll(" | "); @@ -1453,7 +1453,7 @@ pub const DeclGen = struct { const field_i = ty.unionTagFieldIndex(union_obj.tag, mod).?; const field_ty = ty.unionFields().values()[field_i].ty; const field_name = ty.unionFields().keys()[field_i]; - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (field_ty.hasRuntimeBits(mod)) { if (field_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); @@ -5218,25 +5218,25 @@ fn fieldLocation( end: void, } { return switch (container_ty.zigTypeTag(mod)) { - .Struct => switch (container_ty.containerLayout()) { - .Auto, .Extern => for (field_index..container_ty.structFieldCount()) |next_field_index| { - if (container_ty.structFieldIsComptime(next_field_index)) continue; - const field_ty = container_ty.structFieldType(next_field_index); + .Struct => switch (container_ty.containerLayout(mod)) { + .Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index| { + if (container_ty.structFieldIsComptime(next_field_index, mod)) continue; + const field_ty = container_ty.structFieldType(next_field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; break .{ .field = if (container_ty.isSimpleTuple()) .{ .field = next_field_index } else - .{ .identifier = container_ty.structFieldName(next_field_index) } }; + .{ .identifier = container_ty.structFieldName(next_field_index, mod) } }; } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin, .Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0) .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) } else .begin, }, - .Union => switch (container_ty.containerLayout()) { + .Union => switch (container_ty.containerLayout(mod)) { .Auto, .Extern => { - const field_ty = container_ty.structFieldType(field_index); + const field_ty = container_ty.structFieldType(field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return if (container_ty.unionTagTypeSafety() != null and !container_ty.unionHasAllZeroBitFieldTypes(mod)) @@ -5417,101 +5417,111 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { // Ensure complete type definition is visible before accessing fields. _ = try f.typeToIndex(struct_ty, .complete); - const field_name: CValue = switch (struct_ty.tag()) { - .tuple, .anon_struct, .@"struct" => switch (struct_ty.containerLayout()) { - .Auto, .Extern => if (struct_ty.isSimpleTuple()) + const field_name: CValue = switch (struct_ty.ip_index) { + .none => switch (struct_ty.tag()) { + .tuple, .anon_struct => if (struct_ty.isSimpleTuple()) .{ .field = extra.field_index } else - .{ .identifier = struct_ty.structFieldName(extra.field_index) }, - .Packed => { - const struct_obj = struct_ty.castTag(.@"struct").?.data; - const int_info = struct_ty.intInfo(mod); - - const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - - const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index); - const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, - const field_int_signedness = if (inst_ty.isAbiInt(mod)) - inst_ty.intInfo(mod).signedness - else - .unsigned; - const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod))); - - const temp_local = try f.allocLocal(inst, field_int_ty); - try f.writeCValue(writer, temp_local, .Other); - try writer.writeAll(" = zig_wrap_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty); - try writer.writeAll("(("); - try f.renderType(writer, field_int_ty); - try writer.writeByte(')'); - const cant_cast = int_info.bits > 64; - if (cant_cast) { - if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); - try writer.writeAll("zig_lo_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); - try writer.writeByte('('); - } - if (bit_offset > 0) { - try writer.writeAll("zig_shr_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); - try writer.writeByte('('); - } - try f.writeCValue(writer, struct_byval, .Other); - if (bit_offset > 0) { - try writer.writeAll(", "); - try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); - try writer.writeByte(')'); - } - if (cant_cast) try writer.writeByte(')'); - try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits); - try writer.writeAll(");\n"); - if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local; + .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout(mod) == .Packed) { + const operand_lval = if (struct_byval == .constant) blk: { + const operand_local = try f.allocLocal(inst, struct_ty); + try f.writeCValue(writer, operand_local, .Other); + try writer.writeAll(" = "); + try f.writeCValue(writer, struct_byval, .Initializer); + try writer.writeAll(";\n"); + break :blk operand_local; + } else struct_byval; const local = try f.allocLocal(inst, inst_ty); - try writer.writeAll("memcpy("); - try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument); - try writer.writeAll(", "); - try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument); + try writer.writeAll("memcpy(&"); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(", &"); + try f.writeCValue(writer, operand_lval, .Other); try writer.writeAll(", sizeof("); try f.renderType(writer, inst_ty); try writer.writeAll("));\n"); - try freeLocal(f, inst, temp_local.new_local, 0); + + if (struct_byval == .constant) { + try freeLocal(f, inst, operand_lval.new_local, 0); + } + return local; + } else field_name: { + const name = struct_ty.unionFields().keys()[extra.field_index]; + break :field_name if (struct_ty.unionTagTypeSafety()) |_| + .{ .payload_identifier = name } + else + .{ .identifier = name }; }, + else => unreachable, }, - .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout() == .Packed) { - const operand_lval = if (struct_byval == .constant) blk: { - const operand_local = try f.allocLocal(inst, struct_ty); - try f.writeCValue(writer, operand_local, .Other); - try writer.writeAll(" = "); - try f.writeCValue(writer, struct_byval, .Initializer); - try writer.writeAll(";\n"); - break :blk operand_local; - } else struct_byval; + else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + .struct_type => switch (struct_ty.containerLayout(mod)) { + .Auto, .Extern => if (struct_ty.isSimpleTuple()) + .{ .field = extra.field_index } + else + .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, + .Packed => { + const struct_obj = mod.typeToStruct(struct_ty).?; + const int_info = struct_ty.intInfo(mod); - const local = try f.allocLocal(inst, inst_ty); - try writer.writeAll("memcpy(&"); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(", &"); - try f.writeCValue(writer, operand_lval, .Other); - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("));\n"); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - if (struct_byval == .constant) { - try freeLocal(f, inst, operand_lval.new_local, 0); - } + const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); - return local; - } else field_name: { - const name = struct_ty.unionFields().keys()[extra.field_index]; - break :field_name if (struct_ty.unionTagTypeSafety()) |_| - .{ .payload_identifier = name } - else - .{ .identifier = name }; + const field_int_signedness = if (inst_ty.isAbiInt(mod)) + inst_ty.intInfo(mod).signedness + else + .unsigned; + const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod))); + + const temp_local = try f.allocLocal(inst, field_int_ty); + try f.writeCValue(writer, temp_local, .Other); + try writer.writeAll(" = zig_wrap_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty); + try writer.writeAll("(("); + try f.renderType(writer, field_int_ty); + try writer.writeByte(')'); + const cant_cast = int_info.bits > 64; + if (cant_cast) { + if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + try writer.writeAll("zig_lo_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); + try writer.writeByte('('); + } + if (bit_offset > 0) { + try writer.writeAll("zig_shr_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); + try writer.writeByte('('); + } + try f.writeCValue(writer, struct_byval, .Other); + if (bit_offset > 0) { + try writer.writeAll(", "); + try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + try writer.writeByte(')'); + } + if (cant_cast) try writer.writeByte(')'); + try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits); + try writer.writeAll(");\n"); + if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local; + + const local = try f.allocLocal(inst, inst_ty); + try writer.writeAll("memcpy("); + try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument); + try writer.writeAll(", "); + try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument); + try writer.writeAll(", sizeof("); + try f.renderType(writer, inst_ty); + try writer.writeAll("));\n"); + try freeLocal(f, inst, temp_local.new_local, 0); + return local; + }, + }, + else => unreachable, }, - else => unreachable, }; const local = try f.allocLocal(inst, inst_ty); @@ -6805,17 +6815,17 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try a.end(f, writer); } }, - .Struct => switch (inst_ty.containerLayout()) { + .Struct => switch (inst_ty.containerLayout(mod)) { .Auto, .Extern => for (resolved_elements, 0..) |element, field_i| { - if (inst_ty.structFieldIsComptime(field_i)) continue; - const field_ty = inst_ty.structFieldType(field_i); + if (inst_ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = inst_ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const a = try Assignment.start(f, writer, field_ty); try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple()) .{ .field = field_i } else - .{ .identifier = inst_ty.structFieldName(field_i) }); + .{ .identifier = inst_ty.structFieldName(field_i, mod) }); try a.assign(f, writer); try f.writeCValue(writer, element, .Other); try a.end(f, writer); @@ -6831,8 +6841,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { var empty = true; for (0..elements.len) |field_i| { - if (inst_ty.structFieldIsComptime(field_i)) continue; - const field_ty = inst_ty.structFieldType(field_i); + if (inst_ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = inst_ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) { @@ -6844,8 +6854,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } empty = true; for (resolved_elements, 0..) |element, field_i| { - if (inst_ty.structFieldIsComptime(field_i)) continue; - const field_ty = inst_ty.structFieldType(field_i); + if (inst_ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = inst_ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeAll(", "); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 799f18e3e4c0..3321df6d4901 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -299,7 +299,7 @@ pub const CType = extern union { pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *Module) AlignAs { return init( struct_ty.structFieldAlign(field_i, mod), - struct_ty.structFieldType(field_i).abiAlignment(mod), + struct_ty.structFieldType(field_i, mod).abiAlignment(mod), ); } pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs { @@ -1486,23 +1486,23 @@ pub const CType = extern union { } }, - .Struct, .Union => |zig_ty_tag| if (ty.containerLayout() == .Packed) { - if (ty.castTag(.@"struct")) |struct_obj| { - try self.initType(struct_obj.data.backing_int_ty, kind, lookup); + .Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) { + if (mod.typeToStruct(ty)) |struct_obj| { + try self.initType(struct_obj.backing_int_ty, kind, lookup); } else { const bits = @intCast(u16, ty.bitSize(mod)); const int_ty = try mod.intType(.unsigned, bits); try self.initType(int_ty, kind, lookup); } - } else if (ty.isTupleOrAnonStruct()) { + } else if (ty.isTupleOrAnonStruct(mod)) { if (lookup.isMutable()) { for (0..switch (zig_ty_tag) { - .Struct => ty.structFieldCount(), + .Struct => ty.structFieldCount(mod), .Union => ty.unionFields().count(), else => unreachable, }) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; _ = try lookup.typeToIndex(field_ty, switch (kind) { .forward, .forward_parameter => .forward, @@ -1579,11 +1579,11 @@ pub const CType = extern union { } else { var is_packed = false; for (0..switch (zig_ty_tag) { - .Struct => ty.structFieldCount(), + .Struct => ty.structFieldCount(mod), .Union => ty.unionFields().count(), else => unreachable, }) |field_i| { - const field_ty = ty.structFieldType(field_i); + const field_ty = ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const field_align = AlignAs.fieldAlign(ty, field_i, mod); @@ -1929,15 +1929,15 @@ pub const CType = extern union { => { const zig_ty_tag = ty.zigTypeTag(mod); const fields_len = switch (zig_ty_tag) { - .Struct => ty.structFieldCount(), + .Struct => ty.structFieldCount(mod), .Union => ty.unionFields().count(), else => unreachable, }; var c_fields_len: usize = 0; for (0..fields_len) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; c_fields_len += 1; } @@ -1945,8 +1945,8 @@ pub const CType = extern union { const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); var c_field_i: usize = 0; for (0..fields_len) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; defer c_field_i += 1; @@ -1955,7 +1955,7 @@ pub const CType = extern union { std.fmt.allocPrintZ(arena, "f{}", .{field_i}) else arena.dupeZ(u8, switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i), + .Struct => ty.structFieldName(field_i, mod), .Union => ty.unionFields().keys()[field_i], else => unreachable, }), @@ -2074,7 +2074,7 @@ pub const CType = extern union { .fwd_anon_struct, .fwd_anon_union, => { - if (!ty.isTupleOrAnonStruct()) return false; + if (!ty.isTupleOrAnonStruct(mod)) return false; var name_buf: [ std.fmt.count("f{}", .{std.math.maxInt(usize)}) @@ -2084,12 +2084,12 @@ pub const CType = extern union { const zig_ty_tag = ty.zigTypeTag(mod); var c_field_i: usize = 0; for (0..switch (zig_ty_tag) { - .Struct => ty.structFieldCount(), + .Struct => ty.structFieldCount(mod), .Union => ty.unionFields().count(), else => unreachable, }) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; defer c_field_i += 1; @@ -2105,7 +2105,7 @@ pub const CType = extern union { if (ty.isSimpleTuple()) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i), + .Struct => ty.structFieldName(field_i, mod), .Union => ty.unionFields().keys()[field_i], else => unreachable, }, @@ -2210,12 +2210,12 @@ pub const CType = extern union { const zig_ty_tag = ty.zigTypeTag(mod); for (0..switch (ty.zigTypeTag(mod)) { - .Struct => ty.structFieldCount(), + .Struct => ty.structFieldCount(mod), .Union => ty.unionFields().count(), else => unreachable, }) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; self.updateHasherRecurse(hasher, field_ty, switch (self.kind) { @@ -2227,7 +2227,7 @@ pub const CType = extern union { hasher.update(if (ty.isSimpleTuple()) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i), + .Struct => ty.structFieldName(field_i, mod), .Union => ty.unionFields().keys()[field_i], else => unreachable, }); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 10cf66a69abc..6b12c447dcdf 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1986,8 +1986,7 @@ pub const Object = struct { const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(ty)) |struct_obj| { if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { assert(struct_obj.haveLayout()); const info = struct_obj.backing_int_ty.intInfo(mod); @@ -2075,8 +2074,7 @@ pub const Object = struct { return full_di_ty; } - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(ty)) |struct_obj| { if (!struct_obj.haveFieldTypes()) { // This can happen if a struct type makes it all the way to // flush() without ever being instantiated or referenced (even @@ -2105,8 +2103,8 @@ pub const Object = struct { return struct_di_ty; } - const fields = ty.structFields(); - const layout = ty.containerLayout(); + const fields = ty.structFields(mod); + const layout = ty.containerLayout(mod); var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; defer di_fields.deinit(gpa); @@ -2116,7 +2114,7 @@ pub const Object = struct { comptime assert(struct_layout_version == 2); var offset: u64 = 0; - var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod); + var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; const field_size = field.ty.abiSize(mod); @@ -2990,7 +2988,7 @@ pub const DeclGen = struct { return llvm_struct_ty; } - const struct_obj = t.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(t).?; if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); @@ -3696,7 +3694,7 @@ pub const DeclGen = struct { } } - const struct_obj = tv.ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(tv.ty).?; if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); @@ -4043,7 +4041,7 @@ pub const DeclGen = struct { const llvm_u32 = dg.context.intType(32); switch (parent_ty.zigTypeTag(mod)) { .Union => { - if (parent_ty.containerLayout() == .Packed) { + if (parent_ty.containerLayout(mod) == .Packed) { return parent_llvm_ptr; } @@ -4065,14 +4063,14 @@ pub const DeclGen = struct { return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .Struct => { - if (parent_ty.containerLayout() == .Packed) { + if (parent_ty.containerLayout(mod) == .Packed) { if (!byte_aligned) return parent_llvm_ptr; const llvm_usize = dg.context.intType(target.ptrBitWidth()); const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); // count bits of fields before this one const prev_bits = b: { var b: usize = 0; - for (parent_ty.structFields().values()[0..field_index]) |field| { + for (parent_ty.structFields(mod).values()[0..field_index]) |field| { if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; b += @intCast(usize, field.ty.bitSize(mod)); } @@ -5983,7 +5981,7 @@ pub const FuncGen = struct { const struct_ty = self.typeOf(struct_field.struct_operand); const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index); + const field_ty = struct_ty.structFieldType(field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { return null; } @@ -5991,9 +5989,9 @@ pub const FuncGen = struct { if (!isByRef(struct_ty, mod)) { assert(!isByRef(field_ty, mod)); switch (struct_ty.zigTypeTag(mod)) { - .Struct => switch (struct_ty.containerLayout()) { + .Struct => switch (struct_ty.containerLayout(mod)) { .Packed => { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index); const containing_int = struct_llvm_val; const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); @@ -6019,7 +6017,7 @@ pub const FuncGen = struct { }, }, .Union => { - assert(struct_ty.containerLayout() == .Packed); + assert(struct_ty.containerLayout(mod) == .Packed); const containing_int = struct_llvm_val; const elem_llvm_ty = try self.dg.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { @@ -6041,7 +6039,7 @@ pub const FuncGen = struct { switch (struct_ty.zigTypeTag(mod)) { .Struct => { - assert(struct_ty.containerLayout() != .Packed); + assert(struct_ty.containerLayout(mod) != .Packed); var ptr_ty_buf: Type.Payload.Pointer = undefined; const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); @@ -9289,8 +9287,8 @@ pub const FuncGen = struct { return vector; }, .Struct => { - if (result_ty.containerLayout() == .Packed) { - const struct_obj = result_ty.castTag(.@"struct").?.data; + if (result_ty.containerLayout(mod) == .Packed) { + const struct_obj = mod.typeToStruct(result_ty).?; assert(struct_obj.haveLayout()); const big_bits = struct_obj.backing_int_ty.bitSize(mod); const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); @@ -9795,7 +9793,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const struct_ty = struct_ptr_ty.childType(mod); switch (struct_ty.zigTypeTag(mod)) { - .Struct => switch (struct_ty.containerLayout()) { + .Struct => switch (struct_ty.containerLayout(mod)) { .Packed => { const result_ty = self.typeOfIndex(inst); const result_ty_info = result_ty.ptrInfo(mod); @@ -9838,7 +9836,7 @@ pub const FuncGen = struct { }, .Union => { const layout = struct_ty.unionGetLayout(mod); - if (layout.payload_size == 0 or struct_ty.containerLayout() == .Packed) return struct_ptr; + if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr; const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const union_llvm_ty = try self.dg.lowerType(struct_ty); const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, ""); @@ -10530,11 +10528,11 @@ fn llvmFieldIndex( } return null; } - const layout = ty.containerLayout(); + const layout = ty.containerLayout(mod); assert(layout != .Packed); var llvm_field_index: c_uint = 0; - var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod); + var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; const field_align = field.alignment(mod, layout); @@ -11113,7 +11111,7 @@ fn isByRef(ty: Type, mod: *Module) bool { .Array, .Frame => return ty.hasRuntimeBits(mod), .Struct => { // Packed structs are represented to LLVM as integers. - if (ty.containerLayout() == .Packed) return false; + if (ty.containerLayout(mod) == .Packed) return false; if (ty.isSimpleTupleOrAnonStruct()) { const tuple = ty.tupleFields(); var count: usize = 0; @@ -11127,7 +11125,7 @@ fn isByRef(ty: Type, mod: *Module) bool { return false; } var count: usize = 0; - const fields = ty.structFields(); + const fields = ty.structFields(mod); for (fields.values()) |field| { if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; @@ -11137,7 +11135,7 @@ fn isByRef(ty: Type, mod: *Module) bool { } return false; }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Packed => return false, else => return ty.hasRuntimeBits(mod), }, @@ -11176,8 +11174,8 @@ fn isScalar(mod: *Module, ty: Type) bool { .Vector, => true, - .Struct => ty.containerLayout() == .Packed, - .Union => ty.containerLayout() == .Packed, + .Struct => ty.containerLayout(mod) == .Packed, + .Union => ty.containerLayout(mod) == .Packed, else => false, }; } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 52f94cc6d555..41b523b8f468 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -685,7 +685,7 @@ pub const DeclGen = struct { if (ty.isSimpleTupleOrAnonStruct()) { unreachable; // TODO } else { - const struct_ty = ty.castTag(.@"struct").?.data; + const struct_ty = mod.typeToStruct(ty).?; if (struct_ty.layout == .Packed) { return dg.todo("packed struct constants", .{}); @@ -1306,7 +1306,7 @@ pub const DeclGen = struct { } }); } - const struct_ty = ty.castTag(.@"struct").?.data; + const struct_ty = mod.typeToStruct(ty).?; if (struct_ty.layout == .Packed) { return try self.resolveType(struct_ty.backing_int_ty, .direct); @@ -2576,7 +2576,7 @@ pub const DeclGen = struct { const struct_ty = self.typeOf(struct_field.struct_operand); const object_id = try self.resolve(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index); + const field_ty = struct_ty.structFieldType(field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -2595,7 +2595,7 @@ pub const DeclGen = struct { const mod = self.module; const object_ty = object_ptr_ty.childType(mod); switch (object_ty.zigTypeTag(mod)) { - .Struct => switch (object_ty.containerLayout()) { + .Struct => switch (object_ty.containerLayout(mod)) { .Packed => unreachable, // TODO else => { const field_index_ty_ref = try self.intType(.unsigned, 32); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 0561ccbfdac2..7d033de58472 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -360,13 +360,13 @@ pub const DeclState = struct { dbg_info_buffer.appendSliceAssumeCapacity(struct_name); dbg_info_buffer.appendAssumeCapacity(0); - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(ty).?; if (struct_obj.layout == .Packed) { log.debug("TODO implement .debug_info for packed structs", .{}); break :blk; } - const fields = ty.structFields(); + const fields = ty.structFields(mod); for (fields.keys(), 0..) |field_name, field_index| { const field = fields.get(field_name).?; if (!field.ty.hasRuntimeBits(mod)) continue; diff --git a/src/type.zig b/src/type.zig index 2870b5616f70..4e374a39d5bc 100644 --- a/src/type.zig +++ b/src/type.zig @@ -59,8 +59,6 @@ pub const Type = struct { .anyframe_T => return .AnyFrame, - .empty_struct, - .@"struct", .tuple, .anon_struct, => return .Struct, @@ -148,6 +146,7 @@ pub const Type = struct { .opt => unreachable, .enum_tag => unreachable, .simple_value => unreachable, + .aggregate => unreachable, }, } } @@ -501,16 +500,6 @@ pub const Type = struct { return a.elemType2(mod).eql(b.elemType2(mod), mod); }, - .empty_struct => { - const a_namespace = a.castTag(.empty_struct).?.data; - const b_namespace = (b.castTag(.empty_struct) orelse return false).data; - return a_namespace == b_namespace; - }, - .@"struct" => { - const a_struct_obj = a.castTag(.@"struct").?.data; - const b_struct_obj = (b.castTag(.@"struct") orelse return false).data; - return a_struct_obj == b_struct_obj; - }, .tuple => { if (!b.isSimpleTuple()) return false; @@ -720,15 +709,6 @@ pub const Type = struct { hashWithHasher(ty.childType(mod), hasher, mod); }, - .empty_struct => { - std.hash.autoHash(hasher, std.builtin.TypeId.Struct); - const namespace: *const Module.Namespace = ty.castTag(.empty_struct).?.data; - std.hash.autoHash(hasher, namespace); - }, - .@"struct" => { - const struct_obj: *const Module.Struct = ty.castTag(.@"struct").?.data; - std.hash.autoHash(hasher, struct_obj); - }, .tuple => { std.hash.autoHash(hasher, std.builtin.TypeId.Struct); @@ -955,8 +935,6 @@ pub const Type = struct { .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet), .error_set_inferred => return self.copyPayloadShallow(allocator, Payload.ErrorSetInferred), .error_set_single => return self.copyPayloadShallow(allocator, Payload.Name), - .empty_struct => return self.copyPayloadShallow(allocator, Payload.ContainerScope), - .@"struct" => return self.copyPayloadShallow(allocator, Payload.Struct), .@"union", .union_safety_tagged, .union_tagged => return self.copyPayloadShallow(allocator, Payload.Union), .enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple), .enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered), @@ -1033,14 +1011,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .empty_struct => return writer.writeAll("struct {}"), - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), struct_obj.owner_decl, - }); - }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; return writer.print("({s} decl={d})", .{ @@ -1247,22 +1217,10 @@ pub const Type = struct { /// Prints a name suitable for `@typeName`. pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { switch (ty.ip_index) { - .empty_struct_type => try writer.writeAll("@TypeOf(.{})"), - .none => switch (ty.tag()) { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .empty_struct => { - const namespace = ty.castTag(.empty_struct).?.data; - try namespace.renderFullyQualifiedName(mod, "", writer); - }, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - const decl = mod.declPtr(struct_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; const decl = mod.declPtr(union_obj.owner_decl); @@ -1548,7 +1506,18 @@ pub const Type = struct { return; }, .simple_type => |s| return writer.writeAll(@tagName(s)), - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { + const decl = mod.declPtr(struct_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + } else if (struct_type.namespace.unwrap()) |namespace_index| { + const namespace = mod.namespacePtr(namespace_index); + try namespace.renderFullyQualifiedName(mod, "", writer); + } else { + try writer.writeAll("@TypeOf(.{})"); + } + }, + .union_type => @panic("TODO"), .opaque_type => |opaque_type| { const decl = mod.declPtr(opaque_type.decl); @@ -1562,6 +1531,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, } } @@ -1624,12 +1594,10 @@ pub const Type = struct { }, // These are false because they are comptime-only types. - .empty_struct, // These are function *bodies*, not pointers. // Special exceptions have to be made when emitting functions due to // this returning false. - .function, - => return false, + .function => return false, .optional => { const child_ty = ty.optionalChild(mod); @@ -1646,28 +1614,6 @@ pub const Type = struct { } }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (struct_obj.status == .field_types_wip) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - struct_obj.assumed_runtime_bits = true; - return true; - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(struct_obj.haveFieldTypes()), - .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy, - } - for (struct_obj.fields.values()) |field| { - if (field.is_comptime) continue; - if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - .enum_full => { const enum_full = ty.castTag(.enum_full).?.data; return enum_full.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); @@ -1824,7 +1770,31 @@ pub const Type = struct { .generic_poison => unreachable, .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { + // This struct has no fields. + return false; + }; + if (struct_obj.status == .field_types_wip) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + struct_obj.assumed_runtime_bits = true; + return true; + } + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(struct_obj.haveFieldTypes()), + .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy, + } + for (struct_obj.fields.values()) |field| { + if (field.is_comptime) continue; + if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + .union_type => @panic("TODO"), .opaque_type => true, @@ -1835,6 +1805,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, } } @@ -1862,7 +1833,6 @@ pub const Type = struct { .anyframe_T, .tuple, .anon_struct, - .empty_struct, => false, .enum_full, @@ -1877,7 +1847,6 @@ pub const Type = struct { => ty.childType(mod).hasWellDefinedLayout(mod), .optional => ty.isPtrLikeOptional(mod), - .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto, .@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto, .union_tagged => false, }, @@ -1936,7 +1905,13 @@ pub const Type = struct { .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { + // Struct with no fields has a well-defined layout of no bits. + return true; + }; + return struct_obj.layout != .Auto; + }, .union_type => @panic("TODO"), .opaque_type => false, @@ -1947,6 +1922,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, }; } @@ -2146,68 +2122,6 @@ pub const Type = struct { .optional => return abiAlignmentAdvancedOptional(ty, mod, strat), .error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (opt_sema) |sema| { - if (struct_obj.status == .field_types_wip) { - // We'll guess "pointer-aligned", if the struct has an - // underaligned pointer field then some allocations - // might require explicit alignment. - return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; - } - _ = try sema.resolveTypeFields(ty); - } - if (!struct_obj.haveFieldTypes()) switch (strat) { - .eager => unreachable, // struct layout not resolved - .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }; - if (struct_obj.layout == .Packed) { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; - } - }, - .eager => {}, - } - assert(struct_obj.haveLayout()); - return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) }; - } - - const fields = ty.structFields(); - var big_align: u32 = 0; - for (fields.values()) |field| { - if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, - else => |e| return e, - })) continue; - - const field_align = if (field.abi_align != 0) - field.abi_align - else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |a| a, - .val => switch (strat) { - .eager => unreachable, // struct layout not resolved - .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }, - }; - big_align = @max(big_align, field_align); - - // This logic is duplicated in Module.Struct.Field.alignment. - if (struct_obj.layout == .Extern or target.ofmt == .c) { - if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { - // The C ABI requires 128 bit integer fields of structs - // to be 16-bytes aligned. - big_align = @max(big_align, 16); - } - } - } - return AbiAlignmentAdvanced{ .scalar = big_align }; - }, - .tuple, .anon_struct => { const tuple = ty.tupleFields(); var big_align: u32 = 0; @@ -2241,8 +2155,6 @@ pub const Type = struct { return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, true); }, - .empty_struct => return AbiAlignmentAdvanced{ .scalar = 0 }, - .inferred_alloc_const, .inferred_alloc_mut, => unreachable, @@ -2337,7 +2249,69 @@ pub const Type = struct { .generic_poison => unreachable, .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse + return AbiAlignmentAdvanced{ .scalar = 0 }; + + if (opt_sema) |sema| { + if (struct_obj.status == .field_types_wip) { + // We'll guess "pointer-aligned", if the struct has an + // underaligned pointer field then some allocations + // might require explicit alignment. + return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; + } + _ = try sema.resolveTypeFields(ty); + } + if (!struct_obj.haveFieldTypes()) switch (strat) { + .eager => unreachable, // struct layout not resolved + .sema => unreachable, // handled above + .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + }; + if (struct_obj.layout == .Packed) { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => |arena| { + if (!struct_obj.haveLayout()) { + return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; + } + }, + .eager => {}, + } + assert(struct_obj.haveLayout()); + return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) }; + } + + const fields = ty.structFields(mod); + var big_align: u32 = 0; + for (fields.values()) |field| { + if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + else => |e| return e, + })) continue; + + const field_align = if (field.abi_align != 0) + field.abi_align + else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |a| a, + .val => switch (strat) { + .eager => unreachable, // struct layout not resolved + .sema => unreachable, // handled above + .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + }, + }; + big_align = @max(big_align, field_align); + + // This logic is duplicated in Module.Struct.Field.alignment. + if (struct_obj.layout == .Extern or target.ofmt == .c) { + if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { + // The C ABI requires 128 bit integer fields of structs + // to be 16-bytes aligned. + big_align = @max(big_align, 16); + } + } + } + return AbiAlignmentAdvanced{ .scalar = big_align }; + }, .union_type => @panic("TODO"), .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 }, @@ -2348,6 +2322,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, } } @@ -2517,42 +2492,16 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .empty_struct => return AbiSizeAdvanced{ .scalar = 0 }, - - .@"struct", .tuple, .anon_struct => switch (ty.containerLayout()) { - .Packed => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - }, - .eager => {}, - } - assert(struct_obj.haveLayout()); - return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) }; - }, - else => { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - } - }, - .eager => {}, - } - const field_count = ty.structFieldCount(); - if (field_count == 0) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; - }, + .tuple, .anon_struct => { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy, .eager => {}, + } + const field_count = ty.structFieldCount(mod); + if (field_count == 0) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { @@ -2752,7 +2701,42 @@ pub const Type = struct { .generic_poison => unreachable, .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| switch (ty.containerLayout(mod)) { + .Packed => { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse + return AbiSizeAdvanced{ .scalar = 0 }; + + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => |arena| { + if (!struct_obj.haveLayout()) { + return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; + } + }, + .eager => {}, + } + assert(struct_obj.haveLayout()); + return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) }; + }, + else => { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => |arena| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse + return AbiSizeAdvanced{ .scalar = 0 }; + if (!struct_obj.haveLayout()) { + return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; + } + }, + .eager => {}, + } + const field_count = ty.structFieldCount(mod); + if (field_count == 0) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; + }, + }, .union_type => @panic("TODO"), .opaque_type => unreachable, // no size available @@ -2763,6 +2747,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, } } @@ -2850,189 +2835,189 @@ pub const Type = struct { ) Module.CompileError!u64 { const target = mod.getTarget(); - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type.bits, - .ptr_type => |ptr_type| switch (ptr_type.size) { - .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth() * 2, - }, - .array_type => |array_type| { - const len = array_type.len + @boolToInt(array_type.sentinel != .none); - if (len == 0) return 0; - const elem_ty = array_type.child.toType(); - const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); - if (elem_size == 0) return 0; - const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); - return (len - 1) * 8 * elem_size + elem_bit_size; - }, - .vector_type => |vector_type| { - const child_ty = vector_type.child.toType(); - const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); - return elem_bit_size * vector_type.len; - }, - .opt_type => @panic("TODO"), - .error_union_type => @panic("TODO"), - .simple_type => |t| switch (t) { - .f16 => return 16, - .f32 => return 32, - .f64 => return 64, - .f80 => return 80, - .f128 => return 128, - - .usize, - .isize, - .@"anyframe", - => return target.ptrBitWidth(), - - .c_char => return target.c_type_bit_size(.char), - .c_short => return target.c_type_bit_size(.short), - .c_ushort => return target.c_type_bit_size(.ushort), - .c_int => return target.c_type_bit_size(.int), - .c_uint => return target.c_type_bit_size(.uint), - .c_long => return target.c_type_bit_size(.long), - .c_ulong => return target.c_type_bit_size(.ulong), - .c_longlong => return target.c_type_bit_size(.longlong), - .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .c_longdouble => return target.c_type_bit_size(.longdouble), - - .bool => return 1, - .void => return 0, - - // TODO revisit this when we have the concept of the error tag type - .anyerror => return 16, - - .anyopaque => unreachable, - .type => unreachable, - .comptime_int => unreachable, - .comptime_float => unreachable, - .noreturn => unreachable, - .null => unreachable, - .undefined => unreachable, - .enum_literal => unreachable, - .generic_poison => unreachable, - .var_args_param => unreachable, - - .atomic_order => unreachable, // missing call to resolveTypeFields - .atomic_rmw_op => unreachable, // missing call to resolveTypeFields - .calling_convention => unreachable, // missing call to resolveTypeFields - .address_space => unreachable, // missing call to resolveTypeFields - .float_mode => unreachable, // missing call to resolveTypeFields - .reduce_op => unreachable, // missing call to resolveTypeFields - .call_modifier => unreachable, // missing call to resolveTypeFields - .prefetch_options => unreachable, // missing call to resolveTypeFields - .export_options => unreachable, // missing call to resolveTypeFields - .extern_options => unreachable, // missing call to resolveTypeFields - .type_info => unreachable, // missing call to resolveTypeFields - }, - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - .opaque_type => unreachable, - - // values, not types - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - }; - const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - switch (ty.tag()) { - .function => unreachable, // represents machine code; not a pointer - .empty_struct => unreachable, - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (struct_obj.layout != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); - assert(struct_obj.haveLayout()); - return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); - }, - - .tuple, .anon_struct => { - if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); - if (ty.containerLayout() != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - var total: u64 = 0; - for (ty.tupleFields().types) |field_ty| { - total += try bitSizeAdvanced(field_ty, mod, opt_sema); - } - return total; - }, + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .function => unreachable, // represents machine code; not a pointer + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, - .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - const int_tag_ty = try ty.intTagType(mod); - return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); - }, + .tuple, .anon_struct => { + if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + if (ty.containerLayout(mod) != .Packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + } + var total: u64 = 0; + for (ty.tupleFields().types) |field_ty| { + total += try bitSizeAdvanced(field_ty, mod, opt_sema); + } + return total; + }, - .@"union", .union_safety_tagged, .union_tagged => { - if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); - if (ty.containerLayout() != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - const union_obj = ty.cast(Payload.Union).?.data; - assert(union_obj.haveFieldTypes()); + .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { + const int_tag_ty = try ty.intTagType(mod); + return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); + }, - var size: u64 = 0; - for (union_obj.fields.values()) |field| { - size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); - } - return size; - }, + .@"union", .union_safety_tagged, .union_tagged => { + if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + if (ty.containerLayout(mod) != .Packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + } + const union_obj = ty.cast(Payload.Union).?.data; + assert(union_obj.haveFieldTypes()); - .array => { - const payload = ty.castTag(.array).?.data; - const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); - if (elem_size == 0 or payload.len == 0) - return @as(u64, 0); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); - return (payload.len - 1) * 8 * elem_size + elem_bit_size; - }, - .array_sentinel => { - const payload = ty.castTag(.array_sentinel).?.data; - const elem_size = std.math.max( - payload.elem_type.abiAlignment(mod), - payload.elem_type.abiSize(mod), - ); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); - return payload.len * 8 * elem_size + elem_bit_size; - }, + var size: u64 = 0; + for (union_obj.fields.values()) |field| { + size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); + } + return size; + }, + + .array => { + const payload = ty.castTag(.array).?.data; + const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); + if (elem_size == 0 or payload.len == 0) + return @as(u64, 0); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); + return (payload.len - 1) * 8 * elem_size + elem_bit_size; + }, + .array_sentinel => { + const payload = ty.castTag(.array_sentinel).?.data; + const elem_size = std.math.max( + payload.elem_type.abiAlignment(mod), + payload.elem_type.abiSize(mod), + ); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); + return payload.len * 8 * elem_size + elem_bit_size; + }, - .anyframe_T => return target.ptrBitWidth(), + .anyframe_T => return target.ptrBitWidth(), - .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth(), + .pointer => switch (ty.castTag(.pointer).?.data.size) { + .Slice => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth(), + }, + + .error_set, + .error_set_single, + .error_set_inferred, + .error_set_merged, + => return 16, // TODO revisit this when we have the concept of the error tag type + + .optional, .error_union => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.bits, + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth() * 2, + }, + .array_type => |array_type| { + const len = array_type.len + @boolToInt(array_type.sentinel != .none); + if (len == 0) return 0; + const elem_ty = array_type.child.toType(); + const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); + if (elem_size == 0) return 0; + const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); + return (len - 1) * 8 * elem_size + elem_bit_size; + }, + .vector_type => |vector_type| { + const child_ty = vector_type.child.toType(); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); + return elem_bit_size * vector_type.len; + }, + .opt_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .f16 => return 16, + .f32 => return 32, + .f64 => return 64, + .f80 => return 80, + .f128 => return 128, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => return 16, // TODO revisit this when we have the concept of the error tag type + .usize, + .isize, + .@"anyframe", + => return target.ptrBitWidth(), + + .c_char => return target.c_type_bit_size(.char), + .c_short => return target.c_type_bit_size(.short), + .c_ushort => return target.c_type_bit_size(.ushort), + .c_int => return target.c_type_bit_size(.int), + .c_uint => return target.c_type_bit_size(.uint), + .c_long => return target.c_type_bit_size(.long), + .c_ulong => return target.c_type_bit_size(.ulong), + .c_longlong => return target.c_type_bit_size(.longlong), + .c_ulonglong => return target.c_type_bit_size(.ulonglong), + .c_longdouble => return target.c_type_bit_size(.longdouble), + + .bool => return 1, + .void => return 0, + + // TODO revisit this when we have the concept of the error tag type + .anyerror => return 16, + + .anyopaque => unreachable, + .type => unreachable, + .comptime_int => unreachable, + .comptime_float => unreachable, + .noreturn => unreachable, + .null => unreachable, + .undefined => unreachable, + .enum_literal => unreachable, + .generic_poison => unreachable, + .var_args_param => unreachable, + + .atomic_order => unreachable, // missing call to resolveTypeFields + .atomic_rmw_op => unreachable, // missing call to resolveTypeFields + .calling_convention => unreachable, // missing call to resolveTypeFields + .address_space => unreachable, // missing call to resolveTypeFields + .float_mode => unreachable, // missing call to resolveTypeFields + .reduce_op => unreachable, // missing call to resolveTypeFields + .call_modifier => unreachable, // missing call to resolveTypeFields + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + .type_info => unreachable, // missing call to resolveTypeFields + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; + if (struct_obj.layout != .Packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + } + if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); + assert(struct_obj.haveLayout()); + return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); + }, + + .union_type => @panic("TODO"), + .opaque_type => unreachable, - .optional, .error_union => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + // values, not types + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .ptr => unreachable, + .opt => unreachable, + .enum_tag => unreachable, + .aggregate => unreachable, }, } } /// Returns true if the type's layout is already resolved and it is safe /// to use `abiSize`, `abiAlignment` and `bitSize` on it. - pub fn layoutIsResolved(ty: Type, mod: *const Module) bool { + pub fn layoutIsResolved(ty: Type, mod: *Module) bool { switch (ty.zigTypeTag(mod)) { .Struct => { - if (ty.castTag(.@"struct")) |struct_ty| { - return struct_ty.data.haveLayout(); + if (mod.typeToStruct(ty)) |struct_obj| { + return struct_obj.haveLayout(); } return true; }, @@ -3500,18 +3485,23 @@ pub const Type = struct { } } - pub fn containerLayout(ty: Type) std.builtin.Type.ContainerLayout { + pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { return switch (ty.ip_index) { .empty_struct_type => .Auto, .none => switch (ty.tag()) { .tuple, .anon_struct => .Auto, - .@"struct" => ty.castTag(.@"struct").?.data.layout, .@"union" => ty.castTag(.@"union").?.data.layout, .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.layout, .union_tagged => ty.castTag(.union_tagged).?.data.layout, else => unreachable, }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto; + return struct_obj.layout; + }, + else => unreachable, + }, }; } @@ -3631,14 +3621,16 @@ pub const Type = struct { .array_sentinel => ty.castTag(.array_sentinel).?.data.len, .tuple => ty.castTag(.tuple).?.data.types.len, .anon_struct => ty.castTag(.anon_struct).?.data.types.len, - .@"struct" => ty.castTag(.@"struct").?.data.fields.count(), - .empty_struct => 0, else => unreachable, }, else => switch (ip.indexToKey(ty.ip_index)) { .vector_type => |vector_type| vector_type.len, .array_type => |array_type| array_type.len, + .struct_type => |struct_type| { + const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0; + return struct_obj.fields.count(); + }, else => unreachable, }, }; @@ -3665,11 +3657,9 @@ pub const Type = struct { /// Asserts the type is an array, pointer or vector. pub fn sentinel(ty: Type, mod: *const Module) ?Value { return switch (ty.ip_index) { - .empty_struct_type => null, .none => switch (ty.tag()) { .array, .tuple, - .@"struct", => null, .pointer => ty.castTag(.pointer).?.data.sentinel, @@ -3721,16 +3711,16 @@ pub const Type = struct { /// Returns true for integers, enums, error sets, and packed structs. /// If this function returns true, then intInfo() can be called on the type. - pub fn isAbiInt(ty: Type, mod: *const Module) bool { + pub fn isAbiInt(ty: Type, mod: *Module) bool { return switch (ty.zigTypeTag(mod)) { .Int, .Enum, .ErrorSet => true, - .Struct => ty.containerLayout() == .Packed, + .Struct => ty.containerLayout(mod) == .Packed, else => false, }; } /// Asserts the type is an integer, enum, error set, or vector of one of them. - pub fn intInfo(starting_ty: Type, mod: *const Module) InternPool.Key.IntType { + pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType { const target = mod.getTarget(); var ty = starting_ty; @@ -3750,12 +3740,6 @@ pub const Type = struct { return .{ .signedness = .unsigned, .bits = 16 }; }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.layout == .Packed); - ty = struct_obj.backing_int_ty; - }, - else => unreachable, }, .anyerror_type => { @@ -3775,6 +3759,12 @@ pub const Type = struct { .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| return int_type, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.layout == .Packed); + ty = struct_obj.backing_int_ty; + }, + .ptr_type => unreachable, .array_type => unreachable, .vector_type => |vector_type| ty = vector_type.child.toType(), @@ -3782,7 +3772,7 @@ pub const Type = struct { .opt_type => unreachable, .error_union_type => unreachable, .simple_type => unreachable, // handled via Index enum tag above - .struct_type => @panic("TODO"), + .union_type => unreachable, .opaque_type => unreachable, @@ -3793,6 +3783,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, }; } @@ -3996,17 +3987,6 @@ pub const Type = struct { } }, - .@"struct" => { - const s = ty.castTag(.@"struct").?.data; - assert(s.haveFieldTypes()); - for (s.fields.values()) |field| { - if (field.is_comptime) continue; - if ((try field.ty.onePossibleValue(mod)) != null) continue; - return null; - } - return Value.empty_struct; - }, - .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.values, 0..) |val, i| { @@ -4069,8 +4049,6 @@ pub const Type = struct { return Value.empty_struct; }, - .empty_struct => return Value.empty_struct, - .array => { if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); @@ -4158,7 +4136,23 @@ pub const Type = struct { .generic_poison => unreachable, .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + if (mod.structPtrUnwrap(struct_type.index)) |s| { + assert(s.haveFieldTypes()); + for (s.fields.values()) |field| { + if (field.is_comptime) continue; + if ((try field.ty.onePossibleValue(mod)) != null) continue; + return null; + } + } + // In this case the struct has no fields and therefore has one possible value. + const empty = try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .fields = &.{}, + } }); + return empty.toValue(); + }, + .union_type => @panic("TODO"), .opaque_type => return null, @@ -4169,6 +4163,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, }; } @@ -4177,12 +4172,11 @@ pub const Type = struct { /// resolves field types rather than asserting they are already resolved. /// TODO merge these implementations together with the "advanced" pattern seen /// elsewhere in this file. - pub fn comptimeOnly(ty: Type, mod: *const Module) bool { + pub fn comptimeOnly(ty: Type, mod: *Module) bool { return switch (ty.ip_index) { .empty_struct_type => false, .none => switch (ty.tag()) { - .empty_struct, .error_set, .error_set_single, .error_set_inferred, @@ -4222,20 +4216,6 @@ pub const Type = struct { return false; }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .wip, .unknown => { - // Return false to avoid incorrect dependency loops. - // This will be handled correctly once merged with - // `Sema.typeRequiresComptime`. - return false; - }, - .no => return false, - .yes => return true, - } - }, - .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Type.Payload.Union).?.data; switch (union_obj.requires_comptime) { @@ -4326,7 +4306,21 @@ pub const Type = struct { .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + // A struct with no fields is not comptime-only. + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + switch (struct_obj.requires_comptime) { + .wip, .unknown => { + // Return false to avoid incorrect dependency loops. + // This will be handled correctly once merged with + // `Sema.typeRequiresComptime`. + return false; + }, + .no => return false, + .yes => return true, + } + }, + .union_type => @panic("TODO"), .opaque_type => false, @@ -4337,6 +4331,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, }; } @@ -4352,19 +4347,19 @@ pub const Type = struct { }; } - pub fn isIndexable(ty: Type, mod: *const Module) bool { + pub fn isIndexable(ty: Type, mod: *Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, .Pointer => switch (ty.ptrSize(mod)) { .Slice, .Many, .C => true, .One => ty.childType(mod).zigTypeTag(mod) == .Array, }, - .Struct => ty.isTuple(), + .Struct => ty.isTuple(mod), else => false, }; } - pub fn indexableHasLen(ty: Type, mod: *const Module) bool { + pub fn indexableHasLen(ty: Type, mod: *Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, .Pointer => switch (ty.ptrSize(mod)) { @@ -4372,7 +4367,7 @@ pub const Type = struct { .Slice => true, .One => ty.childType(mod).zigTypeTag(mod) == .Array, }, - .Struct => ty.isTuple(), + .Struct => ty.isTuple(mod), else => false, }; } @@ -4381,10 +4376,8 @@ pub const Type = struct { pub fn getNamespaceIndex(ty: Type, mod: *Module) Module.Namespace.OptionalIndex { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .@"struct" => ty.castTag(.@"struct").?.data.namespace.toOptional(), .enum_full => ty.castTag(.enum_full).?.data.namespace.toOptional(), .enum_nonexhaustive => ty.castTag(.enum_nonexhaustive).?.data.namespace.toOptional(), - .empty_struct => @panic("TODO"), .@"union" => ty.castTag(.@"union").?.data.namespace.toOptional(), .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.namespace.toOptional(), .union_tagged => ty.castTag(.union_tagged).?.data.namespace.toOptional(), @@ -4393,6 +4386,7 @@ pub const Type = struct { }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + .struct_type => |struct_type| struct_type.namespace, else => .none, }, }; @@ -4618,161 +4612,188 @@ pub const Type = struct { } } - pub fn structFields(ty: Type) Module.Struct.Fields { - return switch (ty.ip_index) { - .empty_struct_type => .{}, - .none => switch (ty.tag()) { - .empty_struct => .{}, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields; - }, - else => unreachable, + pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields { + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .{}; + assert(struct_obj.haveFieldTypes()); + return struct_obj.fields; }, else => unreachable, - }; + } } - pub fn structFieldName(ty: Type, field_index: usize) []const u8 { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields.keys()[field_index]; + pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) []const u8 { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .anon_struct => return ty.castTag(.anon_struct).?.data.names[field_index], + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.haveFieldTypes()); + return struct_obj.fields.keys()[field_index]; + }, + else => unreachable, }, - .anon_struct => return ty.castTag(.anon_struct).?.data.names[field_index], - else => unreachable, } } - pub fn structFieldCount(ty: Type) usize { + pub fn structFieldCount(ty: Type, mod: *Module) usize { return switch (ty.ip_index) { .empty_struct_type => 0, .none => switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + .tuple => ty.castTag(.tuple).?.data.types.len, + .anon_struct => ty.castTag(.anon_struct).?.data.types.len, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; assert(struct_obj.haveFieldTypes()); return struct_obj.fields.count(); }, - .empty_struct => 0, - .tuple => ty.castTag(.tuple).?.data.types.len, - .anon_struct => ty.castTag(.anon_struct).?.data.types.len, else => unreachable, }, - else => unreachable, }; } /// Supports structs and unions. - pub fn structFieldType(ty: Type, index: usize) Type { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.fields.values()[index].ty; + pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + return union_obj.fields.values()[index].ty; + }, + .tuple => return ty.castTag(.tuple).?.data.types[index], + .anon_struct => return ty.castTag(.anon_struct).?.data.types[index], + else => unreachable, }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.fields.values()[index].ty; + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + return struct_obj.fields.values()[index].ty; + }, + else => unreachable, }, - .tuple => return ty.castTag(.tuple).?.data.types[index], - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index], - else => unreachable, - } + }; } pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.layout != .Packed); - return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + return union_obj.fields.values()[index].normalAlignment(mod); + }, + .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod), + .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod), + else => unreachable, }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.fields.values()[index].normalAlignment(mod); + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.layout != .Packed); + return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); + }, + else => unreachable, }, - .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod), - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod), - else => unreachable, } } - pub fn structFieldDefaultValue(ty: Type, index: usize) Value { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.fields.values()[index].default_val; - }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - return tuple.values[index]; + pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple => { + const tuple = ty.castTag(.tuple).?.data; + return tuple.values[index]; + }, + .anon_struct => { + const struct_obj = ty.castTag(.anon_struct).?.data; + return struct_obj.values[index]; + }, + else => unreachable, }, - .anon_struct => { - const struct_obj = ty.castTag(.anon_struct).?.data; - return struct_obj.values[index]; + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + return struct_obj.fields.values()[index].default_val; + }, + else => unreachable, }, - else => unreachable, } } pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - const field = struct_obj.fields.values()[index]; - if (field.is_comptime) { - return field.default_val; - } else { - return field.ty.onePossibleValue(mod); - } - }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - const val = tuple.values[index]; - if (val.ip_index == .unreachable_value) { - return tuple.types[index].onePossibleValue(mod); - } else { - return val; - } + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple => { + const tuple = ty.castTag(.tuple).?.data; + const val = tuple.values[index]; + if (val.ip_index == .unreachable_value) { + return tuple.types[index].onePossibleValue(mod); + } else { + return val; + } + }, + .anon_struct => { + const anon_struct = ty.castTag(.anon_struct).?.data; + const val = anon_struct.values[index]; + if (val.ip_index == .unreachable_value) { + return anon_struct.types[index].onePossibleValue(mod); + } else { + return val; + } + }, + else => unreachable, }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - const val = anon_struct.values[index]; - if (val.ip_index == .unreachable_value) { - return anon_struct.types[index].onePossibleValue(mod); - } else { - return val; - } + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const field = struct_obj.fields.values()[index]; + if (field.is_comptime) { + return field.default_val; + } else { + return field.ty.onePossibleValue(mod); + } + }, + else => unreachable, }, - else => unreachable, } } - pub fn structFieldIsComptime(ty: Type, index: usize) bool { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (struct_obj.layout == .Packed) return false; - const field = struct_obj.fields.values()[index]; - return field.is_comptime; - }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - const val = tuple.values[index]; - return val.ip_index != .unreachable_value; + pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple => { + const tuple = ty.castTag(.tuple).?.data; + const val = tuple.values[index]; + return val.ip_index != .unreachable_value; + }, + .anon_struct => { + const anon_struct = ty.castTag(.anon_struct).?.data; + const val = anon_struct.values[index]; + return val.ip_index != .unreachable_value; + }, + else => unreachable, }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - const val = anon_struct.values[index]; - return val.ip_index != .unreachable_value; + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + if (struct_obj.layout == .Packed) return false; + const field = struct_obj.fields.values()[index]; + return field.is_comptime; + }, + else => unreachable, }, - else => unreachable, } } pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_type = mod.intern_pool.indexToKey(ty.ip_index).struct_type; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.layout == .Packed); comptime assert(Type.packed_struct_layout_version == 2); @@ -4833,7 +4854,8 @@ pub const Type = struct { /// Get an iterator that iterates over all the struct field, returning the field and /// offset of that field. Asserts that the type is a non-packed struct. pub fn iterateStructOffsets(ty: Type, mod: *Module) StructOffsetIterator { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_type = mod.intern_pool.indexToKey(ty.ip_index).struct_type; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); return .{ .struct_obj = struct_obj, .module = mod }; @@ -4841,57 +4863,62 @@ pub const Type = struct { /// Supports structs and unions. pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.haveLayout()); - assert(struct_obj.layout != .Packed); - var it = ty.iterateStructOffsets(mod); - while (it.next()) |field_offset| { - if (index == field_offset.field) - return field_offset.offset; - } - - return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); - }, + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple, .anon_struct => { + const tuple = ty.tupleFields(); - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); + var offset: u64 = 0; + var big_align: u32 = 0; - var offset: u64 = 0; - var big_align: u32 = 0; + for (tuple.types, 0..) |field_ty, i| { + const field_val = tuple.values[i]; + if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { + // comptime field + if (i == index) return offset; + continue; + } - for (tuple.types, 0..) |field_ty, i| { - const field_val = tuple.values[i]; - if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { - // comptime field + const field_align = field_ty.abiAlignment(mod); + big_align = @max(big_align, field_align); + offset = std.mem.alignForwardGeneric(u64, offset, field_align); if (i == index) return offset; - continue; + offset += field_ty.abiSize(mod); } + offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); + return offset; + }, - const field_align = field_ty.abiAlignment(mod); - big_align = @max(big_align, field_align); - offset = std.mem.alignForwardGeneric(u64, offset, field_align); - if (i == index) return offset; - offset += field_ty.abiSize(mod); - } - offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); - return offset; + .@"union" => return 0, + .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + const layout = union_obj.getLayout(mod, true); + if (layout.tag_align >= layout.payload_align) { + // {Tag, Payload} + return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + } else { + // {Payload, Tag} + return 0; + } + }, + else => unreachable, }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.haveLayout()); + assert(struct_obj.layout != .Packed); + var it = ty.iterateStructOffsets(mod); + while (it.next()) |field_offset| { + if (index == field_offset.field) + return field_offset.offset; + } - .@"union" => return 0, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const layout = union_obj.getLayout(mod, true); - if (layout.tag_align >= layout.payload_align) { - // {Tag, Payload} - return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); - } else { - // {Payload, Tag} - return 0; - } + return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); + }, + + else => unreachable, }, - else => unreachable, } } @@ -4901,6 +4928,7 @@ pub const Type = struct { pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc { switch (ty.ip_index) { + .empty_struct_type => return null, .none => switch (ty.tag()) { .enum_full, .enum_nonexhaustive => { const enum_full = ty.cast(Payload.EnumFull).?.data; @@ -4914,10 +4942,6 @@ pub const Type = struct { const enum_simple = ty.castTag(.enum_simple).?.data; return enum_simple.srcLoc(mod); }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.srcLoc(mod); - }, .error_set => { const error_set = ty.castTag(.error_set).?.data; return error_set.srcLoc(mod); @@ -4930,7 +4954,10 @@ pub const Type = struct { else => return null, }, else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + return struct_obj.srcLoc(mod); + }, .union_type => @panic("TODO"), .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), else => null, @@ -4954,10 +4981,6 @@ pub const Type = struct { const enum_simple = ty.castTag(.enum_simple).?.data; return enum_simple.owner_decl; }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.owner_decl; - }, .error_set => { const error_set = ty.castTag(.error_set).?.data; return error_set.owner_decl; @@ -4970,7 +4993,10 @@ pub const Type = struct { else => return null, }, else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null; + return struct_obj.owner_decl; + }, .union_type => @panic("TODO"), .opaque_type => |opaque_type| opaque_type.decl, else => null, @@ -5013,8 +5039,6 @@ pub const Type = struct { /// The type is the inferred error set of a specific function. error_set_inferred, error_set_merged, - empty_struct, - @"struct", @"union", union_safety_tagged, union_tagged, @@ -5046,12 +5070,10 @@ pub const Type = struct { .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, - .@"struct" => Payload.Struct, .@"union", .union_safety_tagged, .union_tagged => Payload.Union, .enum_full, .enum_nonexhaustive => Payload.EnumFull, .enum_simple => Payload.EnumSimple, .enum_numbered => Payload.EnumNumbered, - .empty_struct => Payload.ContainerScope, .tuple => Payload.Tuple, .anon_struct => Payload.AnonStruct, }; @@ -5082,15 +5104,19 @@ pub const Type = struct { } }; - pub fn isTuple(ty: Type) bool { + pub fn isTuple(ty: Type, mod: *Module) bool { return switch (ty.ip_index) { - .empty_struct_type => true, .none => switch (ty.tag()) { .tuple => true, - .@"struct" => ty.castTag(.@"struct").?.data.is_tuple, else => false, }, - else => false, // TODO struct + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + return struct_obj.is_tuple; + }, + else => false, + }, }; } @@ -5101,36 +5127,41 @@ pub const Type = struct { .anon_struct => true, else => false, }, - else => false, // TODO struct + else => false, }; } - pub fn isTupleOrAnonStruct(ty: Type) bool { + pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { return switch (ty.ip_index) { .empty_struct_type => true, .none => switch (ty.tag()) { .tuple, .anon_struct => true, - .@"struct" => ty.castTag(.@"struct").?.data.is_tuple, else => false, }, - else => false, // TODO struct + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + return struct_obj.is_tuple; + }, + else => false, + }, }; } pub fn isSimpleTuple(ty: Type) bool { return switch (ty.ip_index) { - .empty_struct => true, + .empty_struct_type => true, .none => switch (ty.tag()) { .tuple => true, else => false, }, - else => false, // TODO + else => false, }; } pub fn isSimpleTupleOrAnonStruct(ty: Type) bool { return switch (ty.ip_index) { - .empty_struct => true, + .empty_struct_type => true, .none => switch (ty.tag()) { .tuple, .anon_struct => true, else => false, @@ -5142,7 +5173,7 @@ pub const Type = struct { // Only allowed for simple tuple types pub fn tupleFields(ty: Type) Payload.Tuple.Data { return switch (ty.ip_index) { - .empty_struct => .{ .types = &.{}, .values = &.{} }, + .empty_struct_type => .{ .types = &.{}, .values = &.{} }, .none => switch (ty.tag()) { .tuple => ty.castTag(.tuple).?.data, .anon_struct => .{ @@ -5319,18 +5350,6 @@ pub const Type = struct { data: []const u8, }; - /// Mostly used for namespace like structs with zero fields. - /// Most commonly used for files. - pub const ContainerScope = struct { - base: Payload, - data: *Module.Namespace, - }; - - pub const Struct = struct { - base: Payload = .{ .tag = .@"struct" }, - data: *Module.Struct, - }; - pub const Tuple = struct { base: Payload = .{ .tag = .tuple }, data: Data, diff --git a/src/value.zig b/src/value.zig index c95f218dbe44..3992888b3d77 100644 --- a/src/value.zig +++ b/src/value.zig @@ -996,10 +996,10 @@ pub const Value = struct { const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto => return error.IllDefinedMemoryLayout, .Extern => { - const fields = ty.structFields().values(); + const fields = ty.structFields(mod).values(); const field_vals = val.castTag(.aggregate).?.data; for (fields, 0..) |field, i| { const off = @intCast(usize, ty.structFieldOffset(i, mod)); @@ -1017,7 +1017,7 @@ pub const Value = struct { const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?; std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian); }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Auto => return error.IllDefinedMemoryLayout, .Extern => return error.Unimplemented, .Packed => { @@ -1119,12 +1119,12 @@ pub const Value = struct { bits += elem_bit_size; } }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => unreachable, // Handled in non-packed writeToMemory .Packed => { var bits: u16 = 0; - const fields = ty.structFields().values(); + const fields = ty.structFields(mod).values(); const field_vals = val.castTag(.aggregate).?.data; for (fields, 0..) |field, i| { const field_bits = @intCast(u16, field.ty.bitSize(mod)); @@ -1133,7 +1133,7 @@ pub const Value = struct { } }, }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => unreachable, // Handled in non-packed writeToMemory .Packed => { @@ -1236,14 +1236,14 @@ pub const Value = struct { const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => { - const fields = ty.structFields().values(); + const fields = ty.structFields(mod).values(); const field_vals = try arena.alloc(Value, fields.len); for (fields, 0..) |field, i| { const off = @intCast(usize, ty.structFieldOffset(i, mod)); - const sz = @intCast(usize, ty.structFieldType(i).abiSize(mod)); + const sz = @intCast(usize, ty.structFieldType(i, mod).abiSize(mod)); field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena); } return Tag.aggregate.create(arena, field_vals); @@ -1346,12 +1346,12 @@ pub const Value = struct { } return Tag.aggregate.create(arena, elems); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => unreachable, // Handled by non-packed readFromMemory .Packed => { var bits: u16 = 0; - const fields = ty.structFields().values(); + const fields = ty.structFields(mod).values(); const field_vals = try arena.alloc(Value, fields.len); for (fields, 0..) |field, i| { const field_bits = @intCast(u16, field.ty.bitSize(mod)); @@ -1996,7 +1996,7 @@ pub const Value = struct { } if (ty.zigTypeTag(mod) == .Struct) { - const fields = ty.structFields().values(); + const fields = ty.structFields(mod).values(); assert(fields.len == a_field_vals.len); for (fields, 0..) |field, i| { if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) { @@ -2019,7 +2019,7 @@ pub const Value = struct { .@"union" => { const a_union = a.castTag(.@"union").?.data; const b_union = b.castTag(.@"union").?.data; - switch (ty.containerLayout()) { + switch (ty.containerLayout(mod)) { .Packed, .Extern => { const tag_ty = ty.unionTagTypeHypothetical(); if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, opt_sema))) { @@ -2252,7 +2252,7 @@ pub const Value = struct { .aggregate => { const field_values = val.castTag(.aggregate).?.data; for (field_values, 0..) |field_val, i| { - const field_ty = ty.structFieldType(i); + const field_ty = ty.structFieldType(i, mod); field_val.hash(field_ty, hasher, mod); } }, @@ -2623,7 +2623,7 @@ pub const Value = struct { const data = val.castTag(.field_ptr).?.data; if (data.container_ptr.pointerDecl()) |decl_index| { const container_decl = mod.declPtr(decl_index); - const field_type = data.container_ty.structFieldType(data.field_index); + const field_type = data.container_ty.structFieldType(data.field_index, mod); const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index); return field_val.elemValue(mod, index); } else unreachable; @@ -2758,16 +2758,6 @@ pub const Value = struct { pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value { switch (val.ip_index) { .undef => return Value.undef, - .empty_struct => { - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - return tuple.values[index]; - } - if (try ty.structFieldValueComptime(mod, index)) |some| { - return some; - } - unreachable; - }, .none => switch (val.tag()) { .aggregate => { @@ -2784,7 +2774,10 @@ pub const Value = struct { else => unreachable, }, - else => unreachable, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .aggregate => |aggregate| aggregate.fields[index].toValue(), + else => unreachable, + }, } } From 3ba099bfba9d3c38fe188010aa82fc589b1cabf6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 May 2023 17:21:22 -0700 Subject: [PATCH 058/205] stage2: move union types and values to InternPool --- src/InternPool.zig | 173 +++++++++++++--- src/Module.zig | 77 +++++-- src/Sema.zig | 381 ++++++++++++++++++---------------- src/TypedValue.zig | 4 +- src/arch/aarch64/abi.zig | 4 +- src/arch/arm/abi.zig | 4 +- src/arch/wasm/CodeGen.zig | 10 +- src/arch/wasm/abi.zig | 10 +- src/arch/x86_64/CodeGen.zig | 4 +- src/arch/x86_64/abi.zig | 2 +- src/codegen.zig | 6 +- src/codegen/c.zig | 94 +++++---- src/codegen/c/type.zig | 26 +-- src/codegen/llvm.zig | 14 +- src/codegen/spirv.zig | 10 +- src/link/Dwarf.zig | 4 +- src/type.zig | 399 ++++++++++++++++-------------------- src/value.zig | 12 +- 18 files changed, 688 insertions(+), 546 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 315865c96619..4c4e3ab78a4c 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -21,6 +21,13 @@ allocated_structs: std.SegmentedList(Module.Struct, 0) = .{}, /// When a Struct object is freed from `allocated_structs`, it is pushed into this stack. structs_free_list: std.ArrayListUnmanaged(Module.Struct.Index) = .{}, +/// Union objects are stored in this data structure because: +/// * They contain pointers such as the field maps. +/// * They need to be mutated after creation. +allocated_unions: std.SegmentedList(Module.Union, 0) = .{}, +/// When a Union object is freed from `allocated_unions`, it is pushed into this stack. +unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, + const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -59,10 +66,7 @@ pub const Key = union(enum) { /// If `empty_struct_type` is handled separately, then this value may be /// safely assumed to never be `none`. struct_type: StructType, - union_type: struct { - fields_len: u32, - // TODO move Module.Union data to InternPool - }, + union_type: UnionType, opaque_type: OpaqueType, simple_value: SimpleValue, @@ -87,6 +91,8 @@ pub const Key = union(enum) { /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, /// so the slice length will be one more than the type's array length. aggregate: Aggregate, + /// An instance of a union. + un: Union, pub const IntType = std.builtin.Type.Int; @@ -145,13 +151,27 @@ pub const Key = union(enum) { /// - index == .none /// * A struct which has fields as well as a namepace. pub const StructType = struct { - /// This will be `none` only in the case of `@TypeOf(.{})` - /// (`Index.empty_struct_type`). - namespace: Module.Namespace.OptionalIndex, /// The `none` tag is used to represent two cases: /// * `@TypeOf(.{})`, in which case `namespace` will also be `none`. /// * A struct with no fields, in which case `namespace` will be populated. index: Module.Struct.OptionalIndex, + /// This will be `none` only in the case of `@TypeOf(.{})` + /// (`Index.empty_struct_type`). + namespace: Module.Namespace.OptionalIndex, + }; + + pub const UnionType = struct { + index: Module.Union.Index, + runtime_tag: RuntimeTag, + + pub const RuntimeTag = enum { none, safety, tagged }; + + pub fn hasTag(self: UnionType) bool { + return switch (self.runtime_tag) { + .none => false, + .tagged, .safety => true, + }; + } }; pub const Int = struct { @@ -198,6 +218,15 @@ pub const Key = union(enum) { val: Index, }; + pub const Union = struct { + /// This is the union type; not the field type. + ty: Index, + /// Indicates the active field. + tag: Index, + /// The value of the active field. + val: Index, + }; + pub const Aggregate = struct { ty: Index, fields: []const Index, @@ -229,12 +258,10 @@ pub const Key = union(enum) { .extern_func, .opt, .struct_type, + .union_type, + .un, => |info| std.hash.autoHash(hasher, info), - .union_type => |union_type| { - _ = union_type; - @panic("TODO"); - }, .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), .int => |int| { @@ -320,6 +347,14 @@ pub const Key = union(enum) { const b_info = b.struct_type; return std.meta.eql(a_info, b_info); }, + .union_type => |a_info| { + const b_info = b.union_type; + return std.meta.eql(a_info, b_info); + }, + .un => |a_info| { + const b_info = b.un; + return std.meta.eql(a_info, b_info); + }, .ptr => |a_info| { const b_info = b.ptr; @@ -371,14 +406,6 @@ pub const Key = union(enum) { @panic("TODO"); }, - .union_type => |a_info| { - const b_info = b.union_type; - - _ = a_info; - _ = b_info; - @panic("TODO"); - }, - .opaque_type => |a_info| { const b_info = b.opaque_type; return a_info.decl == b_info.decl; @@ -411,6 +438,7 @@ pub const Key = union(enum) { .extern_func, .enum_tag, .aggregate, + .un, => |x| return x.ty, .simple_value => |s| switch (s) { @@ -838,6 +866,15 @@ pub const Tag = enum(u8) { /// Module.Struct object allocated for it. /// data is Module.Namespace.Index. type_struct_ns, + /// A tagged union type. + /// `data` is `Module.Union.Index`. + type_union_tagged, + /// An untagged union type. It also has no safety tag. + /// `data` is `Module.Union.Index`. + type_union_untagged, + /// An untagged union type which has a safety tag. + /// `data` is `Module.Union.Index`. + type_union_safety, /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. @@ -908,6 +945,8 @@ pub const Tag = enum(u8) { /// * A struct which has 0 fields. /// data is Index of the type, which is known to be zero bits at runtime. only_possible_value, + /// data is extra index to Key.Union. + union_value, }; /// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to @@ -1141,6 +1180,9 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.structs_free_list.deinit(gpa); ip.allocated_structs.deinit(gpa); + ip.unions_free_list.deinit(gpa); + ip.allocated_unions.deinit(gpa); + ip.* = undefined; } @@ -1233,6 +1275,19 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .namespace = @intToEnum(Module.Namespace.Index, data).toOptional(), } }, + .type_union_untagged => .{ .union_type = .{ + .index = @intToEnum(Module.Union.Index, data), + .runtime_tag = .none, + } }, + .type_union_tagged => .{ .union_type = .{ + .index = @intToEnum(Module.Union.Index, data), + .runtime_tag = .tagged, + } }, + .type_union_safety => .{ .union_type = .{ + .index = @intToEnum(Module.Union.Index, data), + .runtime_tag = .safety, + } }, + .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), .val = .none, @@ -1303,6 +1358,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { else => unreachable, }; }, + .union_value => .{ .un = ip.extraData(Key.Union, data) }, }; } @@ -1350,7 +1406,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } - // TODO introduce more pointer encodings ip.items.appendAssumeCapacity(.{ .tag = .type_pointer, .data = try ip.addExtra(gpa, Pointer{ @@ -1450,8 +1505,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .union_type => |union_type| { - _ = union_type; - @panic("TODO"); + ip.items.appendAssumeCapacity(.{ + .tag = switch (union_type.runtime_tag) { + .none => .type_union_untagged, + .safety => .type_union_safety, + .tagged => .type_union_tagged, + }, + .data = @enumToInt(union_type.index), + }); }, .opaque_type => |opaque_type| { @@ -1642,6 +1703,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } @panic("TODO"); }, + + .un => |un| { + assert(un.ty != .none); + assert(un.tag != .none); + assert(un.val != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .union_value, + .data = try ip.addExtra(gpa, un), + }); + }, } return @intToEnum(Index, ip.items.len - 1); } @@ -1923,6 +1994,17 @@ pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex { return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); } +pub fn indexToUnion(ip: *InternPool, val: Index) Module.Union.OptionalIndex { + const tags = ip.items.items(.tag); + if (val == .none) return .none; + switch (tags[@enumToInt(val)]) { + .type_union_tagged, .type_union_untagged, .type_union_safety => {}, + else => return .none, + } + const datas = ip.items.items(.data); + return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional(); +} + pub fn isOptionalType(ip: InternPool, ty: Index) bool { const tags = ip.items.items(.tag); if (ty == .none) return false; @@ -1937,15 +2019,22 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { const items_size = (1 + 4) * ip.items.len; const extra_size = 4 * ip.extra.items.len; const limbs_size = 8 * ip.limbs.items.len; + const structs_size = ip.allocated_structs.len * + (@sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); + const unions_size = ip.allocated_unions.len * + (@sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); // TODO: map overhead size is not taken into account - const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size; + const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + + structs_size + unions_size; std.debug.print( \\InternPool size: {d} bytes \\ {d} items: {d} bytes \\ {d} extra: {d} bytes \\ {d} limbs: {d} bytes + \\ {d} structs: {d} bytes + \\ {d} unions: {d} bytes \\ , .{ total_size, @@ -1955,6 +2044,10 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { extra_size, ip.limbs.items.len, limbs_size, + ip.allocated_structs.len, + structs_size, + ip.allocated_unions.len, + unions_size, }); const tags = ip.items.items(.tag); @@ -1980,8 +2073,14 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_error_union => @sizeOf(ErrorUnion), .type_enum_simple => @sizeOf(EnumSimple), .type_opaque => @sizeOf(Key.OpaqueType), - .type_struct => 0, - .type_struct_ns => 0, + .type_struct => @sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), + .type_struct_ns => @sizeOf(Module.Namespace), + + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + => @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), + .simple_type => 0, .simple_value => 0, .ptr_int => @sizeOf(PtrInt), @@ -2010,6 +2109,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .extern_func => @panic("TODO"), .func => @panic("TODO"), .only_possible_value => 0, + .union_value => @sizeOf(Key.Union), }); } const SortContext = struct { @@ -2041,6 +2141,10 @@ pub fn structPtrUnwrapConst(ip: InternPool, index: Module.Struct.OptionalIndex) return structPtrConst(ip, index.unwrap() orelse return null); } +pub fn unionPtr(ip: *InternPool, index: Module.Union.Index) *Module.Union { + return ip.allocated_unions.at(@enumToInt(index)); +} + pub fn createStruct( ip: *InternPool, gpa: Allocator, @@ -2059,3 +2163,22 @@ pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index // allocation failures here, instead leaking the Struct until garbage collection. }; } + +pub fn createUnion( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Union, +) Allocator.Error!Module.Union.Index { + if (ip.unions_free_list.popOrNull()) |index| return index; + const ptr = try ip.allocated_unions.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Union.Index, ip.allocated_unions.len - 1); +} + +pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) void { + ip.unionPtr(index).* = undefined; + ip.unions_free_list.append(gpa, index) catch { + // In order to keep `destroyUnion` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Union until garbage collection. + }; +} diff --git a/src/Module.zig b/src/Module.zig index ada69537f6d9..6478f7ce4f56 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -851,11 +851,10 @@ pub const Decl = struct { /// If the Decl has a value and it is a union, return it, /// otherwise null. - pub fn getUnion(decl: *Decl) ?*Union { + pub fn getUnion(decl: *Decl, mod: *Module) ?*Union { if (!decl.owns_tv) return null; const ty = (decl.val.castTag(.ty) orelse return null).data; - const union_obj = (ty.cast(Type.Payload.Union) orelse return null).data; - return union_obj; + return mod.typeToUnion(ty); } /// If the Decl has a value and it is a function, return it, @@ -896,10 +895,6 @@ pub const Decl = struct { const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; return enum_obj.namespace.toOptional(); }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - return union_obj.namespace.toOptional(); - }, else => return .none, } @@ -907,6 +902,10 @@ pub const Decl = struct { else => return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), .struct_type => |struct_type| struct_type.namespace, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.namespace.toOptional(); + }, else => .none, }, } @@ -1373,6 +1372,28 @@ pub const Union = struct { requires_comptime: PropertyBoolean = .unknown, assumed_runtime_bits: bool = false, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + pub const Field = struct { /// undefined until `status` is `have_field_types` or `have_layout`. ty: Type, @@ -3639,6 +3660,10 @@ pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace { return mod.allocated_namespaces.at(@enumToInt(index)); } +pub fn unionPtr(mod: *Module, index: Union.Index) *Union { + return mod.intern_pool.unionPtr(index); +} + pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { return mod.intern_pool.structPtr(index); } @@ -4112,7 +4137,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { }; } - if (decl.getUnion()) |union_obj| { + if (decl.getUnion(mod)) |union_obj| { union_obj.zir_index = inst_map.get(union_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl_index); continue; @@ -5988,20 +6013,10 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { decl.analysis = .outdated; } -pub const CreateNamespaceOptions = struct { - parent: Namespace.OptionalIndex, - file_scope: *File, - ty: Type, -}; - -pub fn createNamespace(mod: *Module, options: CreateNamespaceOptions) !Namespace.Index { +pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index { if (mod.namespaces_free_list.popOrNull()) |index| return index; const ptr = try mod.allocated_namespaces.addOne(mod.gpa); - ptr.* = .{ - .parent = options.parent, - .file_scope = options.file_scope, - .ty = options.ty, - }; + ptr.* = initialization; return @intToEnum(Namespace.Index, mod.allocated_namespaces.len - 1); } @@ -6021,6 +6036,14 @@ pub fn destroyStruct(mod: *Module, index: Struct.Index) void { return mod.intern_pool.destroyStruct(mod.gpa, index); } +pub fn createUnion(mod: *Module, initialization: Union) Allocator.Error!Union.Index { + return mod.intern_pool.createUnion(mod.gpa, initialization); +} + +pub fn destroyUnion(mod: *Module, index: Union.Index) void { + return mod.intern_pool.destroyUnion(mod.gpa, index); +} + pub fn allocateNewDecl( mod: *Module, namespace: Namespace.Index, @@ -7068,6 +7091,15 @@ pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { return i.toValue(); } +pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value { + const i = try intern(mod, .{ .un = .{ + .ty = union_ty.ip_index, + .tag = tag.ip_index, + .val = val.ip_index, + } }); + return i.toValue(); +} + pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); } @@ -7276,3 +7308,8 @@ pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct { const struct_index = mod.intern_pool.indexToStruct(ty.ip_index).unwrap() orelse return null; return mod.structPtr(struct_index); } + +pub fn typeToUnion(mod: *Module, ty: Type) ?*Union { + const union_index = mod.intern_pool.indexToUnion(ty.ip_index).unwrap() orelse return null; + return mod.unionPtr(union_index); +} diff --git a/src/Sema.zig b/src/Sema.zig index 1f72470f9eb3..76ac887c06bb 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3123,6 +3123,8 @@ fn zirUnionDecl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; + const gpa = sema.gpa; const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); var extra_index: usize = extended.operand; @@ -3142,49 +3144,57 @@ fn zirUnionDecl( break :blk decls_len; } else 0; - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const union_obj = try new_decl_arena_allocator.create(Module.Union); - const type_tag = if (small.has_tag_type or small.auto_enum_tag) - Type.Tag.union_tagged - else if (small.layout != .Auto) - Type.Tag.@"union" - else switch (block.sema.mod.optimizeMode()) { - .Debug, .ReleaseSafe => Type.Tag.union_safety_tagged, - .ReleaseFast, .ReleaseSmall => Type.Tag.@"union", - }; - const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union); - union_payload.* = .{ - .base = .{ .tag = type_tag }, - .data = union_obj, - }; - const union_ty = Type.initPayload(&union_payload.base); - const union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); - const mod = sema.mod; + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the union type gains an + // InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = union_val, + .val = undefined, }, small.name_strategy, "union", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - union_obj.* = .{ + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const union_index = try mod.createUnion(.{ .owner_decl = new_decl_index, .tag_ty = Type.null, .fields = .{}, .zir_index = inst, .layout = small.layout, .status = .none, - .namespace = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = union_ty, - .file_scope = block.getFileScope(mod), - }), - }; + .namespace = new_namespace_index, + }); + errdefer mod.destroyUnion(union_index); + + const union_ty = try mod.intern_pool.get(gpa, .{ .union_type = .{ + .index = union_index, + .runtime_tag = if (small.has_tag_type or small.auto_enum_tag) + .tagged + else if (small.layout != .Auto) + .none + else switch (block.sema.mod.optimizeMode()) { + .Debug, .ReleaseSafe => .safety, + .ReleaseFast, .ReleaseSmall => .none, + }, + } }); + errdefer mod.intern_pool.remove(union_ty); + + new_decl.val = union_ty.toValue(); + new_namespace.ty = union_ty.toType(); - _ = try mod.scanNamespace(union_obj.namespace, extra_index, decls_len, new_decl); + _ = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); @@ -4246,6 +4256,8 @@ fn validateUnionInit( instrs: []const Zir.Inst.Index, union_ptr: Air.Inst.Ref, ) CompileError!void { + const mod = sema.mod; + if (instrs.len != 1) { const msg = msg: { const msg = try sema.errMsg( @@ -4343,7 +4355,7 @@ fn validateUnionInit( break; } - const tag_ty = union_ty.unionTagTypeHypothetical(); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); @@ -8273,7 +8285,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Enum => operand, .Union => blk: { const union_ty = try sema.resolveTypeFields(operand_ty); - const tag_ty = union_ty.unionTagType() orelse { + const tag_ty = union_ty.unionTagType(mod) orelse { return sema.fail( block, operand_src, @@ -10158,7 +10170,7 @@ fn zirSwitchCapture( const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable; if (operand_ty.zigTypeTag(mod) == .Union) { const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?); - const union_obj = operand_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(operand_ty).?; const field_ty = union_obj.fields.values()[field_index].ty; if (try sema.resolveDefinedValue(block, sema.src, operand_ptr)) |union_val| { if (is_ref) { @@ -10229,7 +10241,7 @@ fn zirSwitchCapture( switch (operand_ty.zigTypeTag(mod)) { .Union => { - const union_obj = operand_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(operand_ty).?; const first_item = try sema.resolveInst(items[0]); // Previous switch validation ensured this will succeed const first_item_val = sema.resolveConstValue(block, .unneeded, first_item, "") catch unreachable; @@ -10403,7 +10415,7 @@ fn zirSwitchCond( .Union => { const union_ty = try sema.resolveTypeFields(operand_ty); - const enum_ty = union_ty.unionTagType() orelse { + const enum_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "switch on union with no attached enum", .{}); errdefer msg.destroy(sema.gpa); @@ -11627,7 +11639,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally and !special.is_inline) for (seen_enum_fields, 0..) |seen_field, index| { if (seen_field != null) continue; - const union_obj = maybe_union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(maybe_union_ty).?; const field_ty = union_obj.fields.values()[index].ty; if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false @@ -12068,7 +12080,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } break :hf switch (ty.zigTypeTag(mod)) { .Struct => ty.structFields(mod).contains(field_name), - .Union => ty.unionFields().contains(field_name), + .Union => ty.unionFields(mod).contains(field_name), .Enum => ty.enumFields().contains(field_name), .Array => mem.eql(u8, field_name, "len"), else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ @@ -15415,7 +15427,7 @@ fn analyzeCmpUnionTag( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const union_ty = try sema.resolveTypeFields(sema.typeOf(un)); - const union_tag_ty = union_ty.unionTagType() orelse { + const union_tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); errdefer msg.destroy(sema.gpa); @@ -16403,7 +16415,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.resolveTypeLayout(ty); // Getting alignment requires type layout const layout = union_ty.containerLayout(mod); - const union_fields = union_ty.unionFields(); + const union_fields = union_ty.unionFields(mod); const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count()); for (union_field_vals, 0..) |*field_val, i| { @@ -16458,7 +16470,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespace(mod)); - const enum_tag_ty_val = if (union_ty.unionTagType()) |tag_ty| v: { + const enum_tag_ty_val = if (union_ty.unionTagType(mod)) |tag_ty| v: { const ty_val = try Value.Tag.ty.create(sema.arena, tag_ty); break :v try Value.Tag.opt_payload.create(sema.arena, ty_val); } else Value.null; @@ -17877,12 +17889,13 @@ fn unionInit( field_name: []const u8, field_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src); - const field = union_ty.unionFields().values()[field_index]; + const field = union_ty.unionFields(mod).values()[field_index]; const init = try sema.coerce(block, field.ty, uncasted_init, init_src); if (try sema.resolveMaybeUndefVal(init)) |init_val| { - const tag_ty = union_ty.unionTagTypeHypothetical(); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ @@ -17983,7 +17996,7 @@ fn zirStructInit( const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); - const tag_ty = resolved_ty.unionTagTypeHypothetical(); + const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); @@ -18006,7 +18019,7 @@ fn zirStructInit( const alloc = try block.addTy(.alloc, alloc_ty); const field_ptr = try sema.unionFieldPtr(block, field_src, alloc, field_name, field_src, resolved_ty, true); try sema.storePtr(block, src, field_ptr, init_inst); - const new_tag = try sema.addConstant(resolved_ty.unionTagTypeHypothetical(), tag_val); + const new_tag = try sema.addConstant(resolved_ty.unionTagTypeHypothetical(mod), tag_val); _ = try block.addBinOp(.set_union_tag, alloc, new_tag); return sema.makePtrConst(block, alloc); } @@ -18544,7 +18557,7 @@ fn fieldType( return sema.addType(field.ty); }, .Union => { - const union_obj = cur_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(cur_ty).?; const field = union_obj.fields.get(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); return sema.addType(field.ty); @@ -18726,7 +18739,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.addStrLit(block, bytes); }, .Enum => operand_ty, - .Union => operand_ty.unionTagType() orelse { + .Union => operand_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "union '{}' is untagged", .{ operand_ty.fmt(sema.mod), @@ -19245,42 +19258,53 @@ fn zirReify( errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); - const union_obj = try new_decl_arena_allocator.create(Module.Union); - const type_tag = if (!tag_type_val.isNull(mod)) - Type.Tag.union_tagged - else if (layout != .Auto) - Type.Tag.@"union" - else switch (mod.optimizeMode()) { - .Debug, .ReleaseSafe => Type.Tag.union_safety_tagged, - .ReleaseFast, .ReleaseSmall => Type.Tag.@"union", - }; - const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union); - union_payload.* = .{ - .base = .{ .tag = type_tag }, - .data = union_obj, - }; - const union_ty = Type.initPayload(&union_payload.base); - const new_union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the union type gains an + // InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = new_union_val, + .val = undefined, }, name_strategy, "union", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - union_obj.* = .{ + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const union_index = try mod.createUnion(.{ .owner_decl = new_decl_index, .tag_ty = Type.null, .fields = .{}, .zir_index = inst, .layout = layout, .status = .have_field_types, - .namespace = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = union_ty, - .file_scope = block.getFileScope(mod), - }), - }; + .namespace = new_namespace_index, + }); + const union_obj = mod.unionPtr(union_index); + errdefer mod.destroyUnion(union_index); + + const union_ty = try mod.intern_pool.get(gpa, .{ .union_type = .{ + .index = union_index, + .runtime_tag = if (!tag_type_val.isNull(mod)) + .tagged + else if (layout != .Auto) + .none + else switch (mod.optimizeMode()) { + .Debug, .ReleaseSafe => .safety, + .ReleaseFast, .ReleaseSmall => .none, + }, + } }); + errdefer mod.intern_pool.remove(union_ty); + + new_decl.val = union_ty.toValue(); + new_namespace.ty = union_ty.toType(); // Tag type var tag_ty_field_names: ?Module.EnumFull.NameMap = null; @@ -21981,8 +22005,8 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr ptr_ty_data.@"align" = blk: { if (mod.typeToStruct(parent_ty)) |struct_obj| { break :blk struct_obj.fields.values()[field_index].abi_align; - } else if (parent_ty.cast(Type.Payload.Union)) |union_obj| { - break :blk union_obj.data.fields.values()[field_index].abi_align; + } else if (mod.typeToUnion(parent_ty)) |union_obj| { + break :blk union_obj.fields.values()[field_index].abi_align; } else { break :blk 0; } @@ -23443,8 +23467,7 @@ fn explainWhyTypeIsComptimeInner( .Union => { if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return; - if (ty.cast(Type.Payload.Union)) |payload| { - const union_obj = payload.data; + if (mod.typeToUnion(ty)) |union_obj| { for (union_obj.fields.values(), 0..) |field, i| { const field_src_loc = union_obj.fieldSrcLoc(sema.mod, .{ .index = i, @@ -24144,7 +24167,7 @@ fn fieldVal( } } const union_ty = try sema.resolveTypeFields(child_type); - if (union_ty.unionTagType()) |enum_ty| { + if (union_ty.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name)) |field_index_usize| { const field_index = @intCast(u32, field_index_usize); return sema.addConstant( @@ -24358,7 +24381,7 @@ fn fieldPtr( } } const union_ty = try sema.resolveTypeFields(child_type); - if (union_ty.unionTagType()) |enum_ty| { + if (union_ty.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name)) |field_index| { const field_index_u32 = @intCast(u32, field_index); var anon_decl = try block.startAnonDecl(); @@ -24489,7 +24512,7 @@ fn fieldCallBind( }, .Union => { const union_ty = try sema.resolveTypeFields(concrete_ty); - const fields = union_ty.unionFields(); + const fields = union_ty.unionFields(mod); const field_index_usize = fields.getIndex(field_name) orelse break :find_field; const field_index = @intCast(u32, field_index_usize); const field = fields.values()[field_index]; @@ -24964,7 +24987,7 @@ fn unionFieldPtr( const union_ptr_ty = sema.typeOf(union_ptr); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; const ptr_field_ty = try Type.ptr(arena, mod, .{ @@ -25028,7 +25051,7 @@ fn unionFieldPtr( try sema.requireRuntimeBlock(block, src, null); if (!initializing and union_obj.layout == .Auto and block.wantSafety() and - union_ty.unionTagTypeSafety() != null and union_obj.fields.count() > 1) + union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1) { const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val); @@ -25057,7 +25080,7 @@ fn unionFieldVal( assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); @@ -25103,7 +25126,7 @@ fn unionFieldVal( try sema.requireRuntimeBlock(block, src, null); if (union_obj.layout == .Auto and block.wantSafety() and - union_ty.unionTagTypeSafety() != null and union_obj.fields.count() > 1) + union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1) { const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val); @@ -26189,7 +26212,7 @@ fn coerceExtra( }, .Union => blk: { // union to its own tag type - const union_tag_ty = inst_ty.unionTagType() orelse break :blk; + const union_tag_ty = inst_ty.unionTagType(mod) orelse break :blk; if (union_tag_ty.eql(dest_ty, sema.mod)) { return sema.unionToTag(block, dest_ty, inst, inst_src); } @@ -28622,7 +28645,7 @@ fn coerceEnumToUnion( const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const tag_ty = union_ty.unionTagType() orelse { + const tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ union_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), @@ -28649,7 +28672,7 @@ fn coerceEnumToUnion( return sema.failWithOwnedErrorMsg(msg); }; - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field = union_obj.fields.values()[field_index]; const field_ty = try sema.resolveTypeFields(field.ty); if (field_ty.zigTypeTag(mod) == .NoReturn) { @@ -28679,10 +28702,7 @@ fn coerceEnumToUnion( return sema.failWithOwnedErrorMsg(msg); }; - return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = val, - .val = opv, - })); + return sema.addConstant(union_ty, try mod.unionValue(union_ty, val, opv)); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -28699,7 +28719,7 @@ fn coerceEnumToUnion( return sema.failWithOwnedErrorMsg(msg); } - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; { var msg: ?*Module.ErrorMsg = null; errdefer if (msg) |some| some.destroy(sema.gpa); @@ -29350,10 +29370,13 @@ fn analyzeRef( const operand_ty = sema.typeOf(operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { - switch (val.tag()) { - .extern_fn, .function => { - const decl_index = val.pointerDecl().?; - return sema.analyzeDeclRef(decl_index); + switch (val.ip_index) { + .none => switch (val.tag()) { + .extern_fn, .function => { + const decl_index = val.pointerDecl().?; + return sema.analyzeDeclRef(decl_index); + }, + else => {}, }, else => {}, } @@ -31523,8 +31546,9 @@ fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void } fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(resolved_ty).?; switch (union_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { @@ -31617,27 +31641,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return false; }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - var requires_comptime = false; - union_obj.requires_comptime = .wip; - for (union_obj.fields.values()) |field| { - if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; - } - if (requires_comptime) { - union_obj.requires_comptime = .yes; - } else { - union_obj.requires_comptime = .no; - } - return requires_comptime; - }, - } - }, - .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; @@ -31734,10 +31737,31 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + var requires_comptime = false; + union_obj.requires_comptime = .wip; + for (union_obj.fields.values()) |field| { + if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; + } + if (requires_comptime) { + union_obj.requires_comptime = .yes; + } else { + union_obj.requires_comptime = .no; + } + return requires_comptime; + }, + } + }, + .opaque_type => false, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -31829,8 +31853,9 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { try sema.resolveUnionLayout(ty); + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(resolved_ty).?; switch (union_obj.status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, .fully_resolved_wip, .fully_resolved => return, @@ -31858,15 +31883,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { const mod = sema.mod; switch (ty.ip_index) { - .none => switch (ty.tag()) { - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - try sema.resolveTypeFieldsUnion(ty, union_obj); - return ty; - }, - - else => return ty, - }, + // TODO: After the InternPool transition is complete, change this to `unreachable`. + .none => return ty, .u1_type, .u8_type, @@ -31957,7 +31975,12 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { try sema.resolveTypeFieldsStruct(ty, struct_obj); return ty; }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + try sema.resolveTypeFieldsUnion(ty, union_obj); + return ty; + }, + else => return ty, }, } @@ -33123,32 +33146,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } }, - .@"union", .union_safety_tagged, .union_tagged => { - const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; - const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse - return null; - const fields = union_obj.fields.values(); - if (fields.len == 0) return Value.@"unreachable"; - const only_field = fields[0]; - if (only_field.ty.eql(resolved_ty, sema.mod)) { - const msg = try Module.ErrorMsg.create( - sema.gpa, - union_obj.srcLoc(sema.mod), - "union '{}' depends on itself", - .{ty.fmt(sema.mod)}, - ); - try sema.addFieldErrNote(resolved_ty, 0, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(msg); - } - const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse - return null; - // TODO make this not allocate. - return try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = val_val, - }); - }, .array => { if (ty.arrayLen(mod) == 0) @@ -33268,10 +33265,37 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return empty.toValue(); }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const resolved_ty = try sema.resolveTypeFields(ty); + const union_obj = mod.unionPtr(union_type.index); + const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse + return null; + const fields = union_obj.fields.values(); + if (fields.len == 0) return Value.@"unreachable"; + const only_field = fields[0]; + if (only_field.ty.eql(resolved_ty, sema.mod)) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + union_obj.srcLoc(sema.mod), + "union '{}' depends on itself", + .{ty.fmt(sema.mod)}, + ); + try sema.addFieldErrNote(resolved_ty, 0, msg, "while checking this field", .{}); + return sema.failWithOwnedErrorMsg(msg); + } + const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse + return null; + const only = try mod.intern(.{ .un = .{ + .ty = resolved_ty.ip_index, + .tag = tag_val.ip_index, + .val = val_val.ip_index, + } }); + return only.toValue(); + }, .opaque_type => null, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -33710,30 +33734,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return false; }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - if (union_obj.status == .field_types_wip) - return false; - - try sema.resolveTypeFieldsUnion(ty, union_obj); - - union_obj.requires_comptime = .wip; - for (union_obj.fields.values()) |field| { - if (try sema.typeRequiresComptime(field.ty)) { - union_obj.requires_comptime = .yes; - return true; - } - } - union_obj.requires_comptime = .no; - return false; - }, - } - }, - .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; @@ -33837,10 +33837,34 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (union_obj.status == .field_types_wip) + return false; + + try sema.resolveTypeFieldsUnion(ty, union_obj); + + union_obj.requires_comptime = .wip; + for (union_obj.fields.values()) |field| { + if (try sema.typeRequiresComptime(field.ty)) { + union_obj.requires_comptime = .yes; + return true; + } + } + union_obj.requires_comptime = .no; + return false; + }, + } + }, + .opaque_type => false, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -33905,8 +33929,9 @@ fn unionFieldIndex( field_name: []const u8, field_src: LazySrcLoc, ) !u32 { + const mod = sema.mod; const union_ty = try sema.resolveTypeFields(unresolved_union_ty); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_index_usize = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); return @intCast(u32, field_index_usize); diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 2105d3108f3c..cf9888f35703 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -91,7 +91,7 @@ pub fn print( try writer.writeAll(".{ "); try print(.{ - .ty = ty.cast(Type.Payload.Union).?.data.tag_ty, + .ty = mod.unionPtr(mod.intern_pool.indexToKey(ty.ip_index).union_type.index).tag_ty, .val = union_val.tag, }, writer, level - 1, mod); try writer.writeAll(" = "); @@ -185,7 +185,7 @@ pub fn print( }, } } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { - const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; + const field_name = field_ptr.container_ty.unionFields(mod).keys()[field_ptr.field_index]; return writer.print(".{s}", .{field_name}); } else if (field_ptr.container_ty.isSlice(mod)) { switch (field_ptr.field_index) { diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 6589425fc229..72a617289544 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -79,7 +79,7 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { const invalid = std.math.maxInt(u8); switch (ty.zigTypeTag(mod)) { .Union => { - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); var max_count: u8 = 0; for (fields.values()) |field| { const field_count = countFloats(field.ty, mod, maybe_float_bits); @@ -118,7 +118,7 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type { switch (ty.zigTypeTag(mod)) { .Union => { - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); for (fields.values()) |field| { if (getFloatArrayType(field.ty, mod)) |some| return some; } diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index 7a7d632837df..e4a07f22bff3 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -62,7 +62,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; - for (ty.unionFields().values()) |field| { + for (ty.unionFields(mod).values()) |field| { if (field.ty.bitSize(mod) > 32 or field.normalAlignment(mod) > 32) { return Class.arrSize(bit_size, 64); } @@ -121,7 +121,7 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 { const invalid = std.math.maxInt(u32); switch (ty.zigTypeTag(mod)) { .Union => { - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); var max_count: u32 = 0; for (fields.values()) |field| { const field_count = countFloats(field.ty, mod, maybe_float_bits); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 90c26d5d84be..c1409e49776c 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1739,8 +1739,8 @@ fn isByRef(ty: Type, mod: *Module) bool { .Frame, => return ty.hasRuntimeBitsIgnoreComptime(mod), .Union => { - if (ty.castTag(.@"union")) |union_ty| { - if (union_ty.data.layout == .Packed) { + if (mod.typeToUnion(ty)) |union_obj| { + if (union_obj.layout == .Packed) { return ty.abiSize(mod) > 8; } } @@ -3175,7 +3175,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, .Union => { // in this case we have a packed union which will not be passed by reference. - const union_ty = ty.cast(Type.Payload.Union).?.data; + const union_ty = mod.typeToUnion(ty).?; const union_obj = val.castTag(.@"union").?.data; const field_index = ty.unionTagFieldIndex(union_obj.tag, func.bin_file.base.options.module.?).?; const field_ty = union_ty.fields.values()[field_index].ty; @@ -5086,12 +5086,12 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = result: { const union_ty = func.typeOfIndex(inst); const layout = union_ty.unionGetLayout(mod); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field = union_obj.fields.values()[extra.field_index]; const field_name = union_obj.fields.keys()[extra.field_index]; const tag_int = blk: { - const tag_ty = union_ty.unionTagTypeHypothetical(); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = tag_ty.enumFieldIndex(field_name).?; var tag_val_payload: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index ee836bebdb48..92b0f4dc407c 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -70,8 +70,8 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class { } const layout = ty.unionGetLayout(mod); std.debug.assert(layout.tag_size == 0); - if (ty.unionFields().count() > 1) return memory; - return classifyType(ty.unionFields().values()[0].ty, mod); + if (ty.unionFields(mod).count() > 1) return memory; + return classifyType(ty.unionFields(mod).values()[0].ty, mod); }, .ErrorUnion, .Frame, @@ -111,11 +111,11 @@ pub fn scalarType(ty: Type, mod: *Module) Type { if (ty.containerLayout(mod) != .Packed) { const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0 and layout.tag_size != 0) { - return scalarType(ty.unionTagTypeSafety().?, mod); + return scalarType(ty.unionTagTypeSafety(mod).?, mod); } - std.debug.assert(ty.unionFields().count() == 1); + std.debug.assert(ty.unionFields(mod).count() == 1); } - return scalarType(ty.unionFields().values()[0].ty, mod); + return scalarType(ty.unionFields(mod).values()[0].ty, mod); }, else => return ty, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 77661b2a14ca..7b93ff205974 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -11410,9 +11410,9 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv = try self.allocRegOrMem(inst, false); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_name = union_obj.fields.keys()[extra.field_index]; - const tag_ty = union_ty.unionTagTypeSafety().?; + const tag_ty = union_obj.tag_ty; const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index }; const tag_val = Value.initPayload(&tag_pl.base); diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index 45ce64a98e7b..69df5dbf4c7a 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -338,7 +338,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { if (ty_size > 64) return memory_class; - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); for (fields.values()) |field| { if (field.abi_align != 0) { if (field.abi_align < field.ty.abiAlignment(mod)) { diff --git a/src/codegen.zig b/src/codegen.zig index b29af1ff931a..5c022392bfa2 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -568,7 +568,7 @@ pub fn generateSymbol( if (layout.payload_size == 0) { return generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType().?, + .ty = typed_value.ty.unionTagType(mod).?, .val = union_obj.tag, }, code, debug_output, reloc_info); } @@ -576,7 +576,7 @@ pub fn generateSymbol( // Check if we should store the tag first. if (layout.tag_align >= layout.payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType().?, + .ty = typed_value.ty.unionTagType(mod).?, .val = union_obj.tag, }, code, debug_output, reloc_info)) { .ok => {}, @@ -584,7 +584,7 @@ pub fn generateSymbol( } } - const union_ty = typed_value.ty.cast(Type.Payload.Union).?.data; + const union_ty = mod.typeToUnion(typed_value.ty).?; const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?; assert(union_ty.haveFieldTypes()); const field_ty = union_ty.fields.values()[field_index].ty; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 1c1621650409..872bdb94d385 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -853,7 +853,7 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - if (ty.unionTagTypeSafety()) |tag_ty| { + if (ty.unionTagTypeSafety(mod)) |tag_ty| { const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); @@ -863,12 +863,12 @@ pub const DeclGen = struct { if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } - for (ty.unionFields().values()) |field| { + for (ty.unionFields(mod).values()) |field| { if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, val, initializer_type); break; } - if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); + if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}'); return writer.writeByte('}'); }, .ErrorUnion => { @@ -1451,8 +1451,8 @@ pub const DeclGen = struct { } const field_i = ty.unionTagFieldIndex(union_obj.tag, mod).?; - const field_ty = ty.unionFields().values()[field_i].ty; - const field_name = ty.unionFields().keys()[field_i]; + const field_ty = ty.unionFields(mod).values()[field_i].ty; + const field_name = ty.unionFields(mod).keys()[field_i]; if (ty.containerLayout(mod) == .Packed) { if (field_ty.hasRuntimeBits(mod)) { if (field_ty.isPtrAtRuntime(mod)) { @@ -1472,7 +1472,7 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - if (ty.unionTagTypeSafety()) |tag_ty| { + if (ty.unionTagTypeSafety(mod)) |tag_ty| { const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); @@ -1486,12 +1486,12 @@ pub const DeclGen = struct { try writer.print(" .{ } = ", .{fmtIdent(field_name)}); try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); try writer.writeByte(' '); - } else for (ty.unionFields().values()) |field| { + } else for (ty.unionFields(mod).values()) |field| { if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, Value.undef, initializer_type); break; } - if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); + if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}'); try writer.writeByte('}'); }, @@ -5238,13 +5238,13 @@ fn fieldLocation( .Auto, .Extern => { const field_ty = container_ty.structFieldType(field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) - return if (container_ty.unionTagTypeSafety() != null and + return if (container_ty.unionTagTypeSafety(mod) != null and !container_ty.unionHasAllZeroBitFieldTypes(mod)) .{ .field = .{ .identifier = "payload" } } else .begin; - const field_name = container_ty.unionFields().keys()[field_index]; - return .{ .field = if (container_ty.unionTagTypeSafety()) |_| + const field_name = container_ty.unionFields(mod).keys()[field_index]; + return .{ .field = if (container_ty.unionTagTypeSafety(mod)) |_| .{ .payload_identifier = field_name } else .{ .identifier = field_name } }; @@ -5424,37 +5424,6 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { else .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, - .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout(mod) == .Packed) { - const operand_lval = if (struct_byval == .constant) blk: { - const operand_local = try f.allocLocal(inst, struct_ty); - try f.writeCValue(writer, operand_local, .Other); - try writer.writeAll(" = "); - try f.writeCValue(writer, struct_byval, .Initializer); - try writer.writeAll(";\n"); - break :blk operand_local; - } else struct_byval; - - const local = try f.allocLocal(inst, inst_ty); - try writer.writeAll("memcpy(&"); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(", &"); - try f.writeCValue(writer, operand_lval, .Other); - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("));\n"); - - if (struct_byval == .constant) { - try freeLocal(f, inst, operand_lval.new_local, 0); - } - - return local; - } else field_name: { - const name = struct_ty.unionFields().keys()[extra.field_index]; - break :field_name if (struct_ty.unionTagTypeSafety()) |_| - .{ .payload_identifier = name } - else - .{ .identifier = name }; - }, else => unreachable, }, else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { @@ -5520,6 +5489,41 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { return local; }, }, + .union_type => |union_type| field_name: { + const union_obj = mod.unionPtr(union_type.index); + if (union_obj.layout == .Packed) { + const operand_lval = if (struct_byval == .constant) blk: { + const operand_local = try f.allocLocal(inst, struct_ty); + try f.writeCValue(writer, operand_local, .Other); + try writer.writeAll(" = "); + try f.writeCValue(writer, struct_byval, .Initializer); + try writer.writeAll(";\n"); + break :blk operand_local; + } else struct_byval; + + const local = try f.allocLocal(inst, inst_ty); + try writer.writeAll("memcpy(&"); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(", &"); + try f.writeCValue(writer, operand_lval, .Other); + try writer.writeAll(", sizeof("); + try f.renderType(writer, inst_ty); + try writer.writeAll("));\n"); + + if (struct_byval == .constant) { + try freeLocal(f, inst, operand_lval.new_local, 0); + } + + return local; + } else { + const name = union_obj.fields.keys()[extra.field_index]; + break :field_name if (union_type.hasTag()) .{ + .payload_identifier = name, + } else .{ + .identifier = name, + }; + } + }, else => unreachable, }, }; @@ -6461,7 +6465,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const union_ty = f.typeOf(bin_op.lhs).childType(mod); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; - const tag_ty = union_ty.unionTagTypeSafety().?; + const tag_ty = union_ty.unionTagTypeSafety(mod).?; const writer = f.object.writer(); const a = try Assignment.start(f, writer, tag_ty); @@ -6907,7 +6911,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = f.typeOfIndex(inst); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_name = union_obj.fields.keys()[extra.field_index]; const payload_ty = f.typeOf(extra.init); const payload = try f.resolveInst(extra.init); @@ -6923,7 +6927,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { return local; } - const field: CValue = if (union_ty.unionTagTypeSafety()) |tag_ty| field: { + const field: CValue = if (union_ty.unionTagTypeSafety(mod)) |tag_ty| field: { const layout = union_ty.unionGetLayout(mod); if (layout.tag_size != 0) { const field_index = tag_ty.enumFieldIndex(field_name).?; diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 3321df6d4901..bcb4b92228d7 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -303,7 +303,7 @@ pub const CType = extern union { ); } pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs { - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const union_payload_align = union_obj.abiAlignment(mod, false); return init(union_payload_align, union_payload_align); } @@ -1498,7 +1498,7 @@ pub const CType = extern union { if (lookup.isMutable()) { for (0..switch (zig_ty_tag) { .Struct => ty.structFieldCount(mod), - .Union => ty.unionFields().count(), + .Union => ty.unionFields(mod).count(), else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i, mod); @@ -1531,7 +1531,7 @@ pub const CType = extern union { .payload => unreachable, }); } else { - const tag_ty = ty.unionTagTypeSafety(); + const tag_ty = ty.unionTagTypeSafety(mod); const is_tagged_union_wrapper = kind != .payload and tag_ty != null; const is_struct = zig_ty_tag == .Struct or is_tagged_union_wrapper; switch (kind) { @@ -1580,7 +1580,7 @@ pub const CType = extern union { var is_packed = false; for (0..switch (zig_ty_tag) { .Struct => ty.structFieldCount(mod), - .Union => ty.unionFields().count(), + .Union => ty.unionFields(mod).count(), else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i, mod); @@ -1930,7 +1930,7 @@ pub const CType = extern union { const zig_ty_tag = ty.zigTypeTag(mod); const fields_len = switch (zig_ty_tag) { .Struct => ty.structFieldCount(mod), - .Union => ty.unionFields().count(), + .Union => ty.unionFields(mod).count(), else => unreachable, }; @@ -1956,7 +1956,7 @@ pub const CType = extern union { else arena.dupeZ(u8, switch (zig_ty_tag) { .Struct => ty.structFieldName(field_i, mod), - .Union => ty.unionFields().keys()[field_i], + .Union => ty.unionFields(mod).keys()[field_i], else => unreachable, }), .type = store.set.typeToIndex(field_ty, mod, switch (kind) { @@ -1986,7 +1986,7 @@ pub const CType = extern union { unnamed_pl.* = .{ .base = .{ .tag = t }, .data = .{ .fields = fields_pl, .owner_decl = ty.getOwnerDecl(mod), - .id = if (ty.unionTagTypeSafety()) |_| 0 else unreachable, + .id = if (ty.unionTagTypeSafety(mod)) |_| 0 else unreachable, } }; return initPayload(unnamed_pl); }, @@ -2085,7 +2085,7 @@ pub const CType = extern union { var c_field_i: usize = 0; for (0..switch (zig_ty_tag) { .Struct => ty.structFieldCount(mod), - .Union => ty.unionFields().count(), + .Union => ty.unionFields(mod).count(), else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i, mod); @@ -2106,7 +2106,7 @@ pub const CType = extern union { std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else switch (zig_ty_tag) { .Struct => ty.structFieldName(field_i, mod), - .Union => ty.unionFields().keys()[field_i], + .Union => ty.unionFields(mod).keys()[field_i], else => unreachable, }, mem.span(c_field.name), @@ -2122,7 +2122,7 @@ pub const CType = extern union { .packed_unnamed_union, => switch (self.kind) { .forward, .forward_parameter, .complete, .parameter, .global => unreachable, - .payload => if (ty.unionTagTypeSafety()) |_| { + .payload => if (ty.unionTagTypeSafety(mod)) |_| { const data = cty.cast(Payload.Unnamed).?.data; return ty.getOwnerDecl(mod) == data.owner_decl and data.id == 0; } else unreachable, @@ -2211,7 +2211,7 @@ pub const CType = extern union { const zig_ty_tag = ty.zigTypeTag(mod); for (0..switch (ty.zigTypeTag(mod)) { .Struct => ty.structFieldCount(mod), - .Union => ty.unionFields().count(), + .Union => ty.unionFields(mod).count(), else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i, mod); @@ -2228,7 +2228,7 @@ pub const CType = extern union { std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else switch (zig_ty_tag) { .Struct => ty.structFieldName(field_i, mod), - .Union => ty.unionFields().keys()[field_i], + .Union => ty.unionFields(mod).keys()[field_i], else => unreachable, }); autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align"); @@ -2241,7 +2241,7 @@ pub const CType = extern union { .packed_unnamed_union, => switch (self.kind) { .forward, .forward_parameter, .complete, .parameter, .global => unreachable, - .payload => if (ty.unionTagTypeSafety()) |_| { + .payload => if (ty.unionTagTypeSafety(mod)) |_| { autoHash(hasher, ty.getOwnerDecl(mod)); autoHash(hasher, @as(u32, 0)); } else unreachable, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 6b12c447dcdf..c2992534424f 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2178,7 +2178,7 @@ pub const Object = struct { break :blk fwd_decl; }; - const union_obj = ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(ty).?; if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime(mod)) { const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, union_di_ty); @@ -3063,7 +3063,7 @@ pub const DeclGen = struct { gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); const layout = t.unionGetLayout(mod); - const union_obj = t.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(t).?; if (union_obj.layout == .Packed) { const bitsize = @intCast(c_uint, t.bitSize(mod)); @@ -3797,11 +3797,11 @@ pub const DeclGen = struct { if (layout.payload_size == 0) { return lowerValue(dg, .{ - .ty = tv.ty.unionTagTypeSafety().?, + .ty = tv.ty.unionTagTypeSafety(mod).?, .val = tag_and_val.tag, }); } - const union_obj = tv.ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(tv.ty).?; const field_index = tv.ty.unionTagFieldIndex(tag_and_val.tag, dg.module).?; assert(union_obj.haveFieldTypes()); @@ -3851,7 +3851,7 @@ pub const DeclGen = struct { } } const llvm_tag_value = try lowerValue(dg, .{ - .ty = tv.ty.unionTagTypeSafety().?, + .ty = tv.ty.unionTagTypeSafety(mod).?, .val = tag_and_val.tag, }); var fields: [3]*llvm.Value = undefined; @@ -9410,7 +9410,7 @@ pub const FuncGen = struct { const union_ty = self.typeOfIndex(inst); const union_llvm_ty = try self.dg.lowerType(union_ty); const layout = union_ty.unionGetLayout(mod); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; if (union_obj.layout == .Packed) { const big_bits = union_ty.bitSize(mod); @@ -9427,7 +9427,7 @@ pub const FuncGen = struct { } const tag_int = blk: { - const tag_ty = union_ty.unionTagTypeHypothetical(); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); const union_field_name = union_obj.fields.keys()[extra.field_index]; const enum_field_index = tag_ty.enumFieldIndex(union_field_name).?; var tag_val_payload: Value.Payload.U32 = .{ diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 41b523b8f468..1176eb746d93 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -755,10 +755,10 @@ pub const DeclGen = struct { const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { - return try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); + return try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); } - const union_ty = ty.cast(Type.Payload.Union).?.data; + const union_ty = mod.typeToUnion(ty).?; if (union_ty.layout == .Packed) { return dg.todo("packed union constants", .{}); } @@ -770,7 +770,7 @@ pub const DeclGen = struct { const tag_first = layout.tag_align >= layout.payload_align; if (has_tag and tag_first) { - try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); + try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); } const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { @@ -782,7 +782,7 @@ pub const DeclGen = struct { try self.addUndef(payload_padding_len); if (has_tag and !tag_first) { - try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); + try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); } try self.addUndef(layout.padding); @@ -1121,7 +1121,7 @@ pub const DeclGen = struct { fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !CacheRef { const mod = self.module; const layout = ty.unionGetLayout(mod); - const union_ty = ty.cast(Type.Payload.Union).?.data; + const union_ty = mod.typeToUnion(ty).?; if (union_ty.layout == .Packed) { return self.todo("packed union types", .{}); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 7d033de58472..d1e8d9601bd3 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -432,7 +432,7 @@ pub const DeclState = struct { }, .Union => { const layout = ty.unionGetLayout(mod); - const union_obj = ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(ty).?; const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0; const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size; const is_tagged = layout.tag_size > 0; @@ -476,7 +476,7 @@ pub const DeclState = struct { try dbg_info_buffer.writer().print("{s}\x00", .{union_name}); } - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); for (fields.keys()) |field_name| { const field = fields.get(field_name).?; if (!field.ty.hasRuntimeBits(mod)) continue; diff --git a/src/type.zig b/src/type.zig index 4e374a39d5bc..0096a96aa22a 100644 --- a/src/type.zig +++ b/src/type.zig @@ -68,11 +68,6 @@ pub const Type = struct { .enum_simple, .enum_numbered, => return .Enum, - - .@"union", - .union_safety_tagged, - .union_tagged, - => return .Union, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return .Int, @@ -140,6 +135,7 @@ pub const Type = struct { }, // values, not types + .un => unreachable, .extern_func => unreachable, .int => unreachable, .ptr => unreachable, @@ -585,12 +581,6 @@ pub const Type = struct { const b_enum_obj = (b.cast(Payload.EnumNumbered) orelse return false).data; return a_enum_obj == b_enum_obj; }, - - .@"union", .union_safety_tagged, .union_tagged => { - const a_union_obj = a.cast(Payload.Union).?.data; - const b_union_obj = (b.cast(Payload.Union) orelse return false).data; - return a_union_obj == b_union_obj; - }, } } @@ -752,12 +742,6 @@ pub const Type = struct { std.hash.autoHash(hasher, std.builtin.TypeId.Enum); std.hash.autoHash(hasher, enum_obj); }, - - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj: *const Module.Union = ty.cast(Payload.Union).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Union); - std.hash.autoHash(hasher, union_obj); - }, } } @@ -935,7 +919,6 @@ pub const Type = struct { .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet), .error_set_inferred => return self.copyPayloadShallow(allocator, Payload.ErrorSetInferred), .error_set_single => return self.copyPayloadShallow(allocator, Payload.Name), - .@"union", .union_safety_tagged, .union_tagged => return self.copyPayloadShallow(allocator, Payload.Union), .enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple), .enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered), .enum_full, .enum_nonexhaustive => return self.copyPayloadShallow(allocator, Payload.EnumFull), @@ -1011,12 +994,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), union_obj.owner_decl, - }); - }, .enum_full, .enum_nonexhaustive => { const enum_full = ty.cast(Payload.EnumFull).?.data; return writer.print("({s} decl={d})", .{ @@ -1221,11 +1198,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const decl = mod.declPtr(union_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, .enum_full, .enum_nonexhaustive => { const enum_full = ty.cast(Payload.EnumFull).?.data; const decl = mod.declPtr(enum_full.owner_decl); @@ -1518,13 +1490,18 @@ pub const Type = struct { } }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + const decl = mod.declPtr(union_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + }, .opaque_type => |opaque_type| { const decl = mod.declPtr(opaque_type.decl); try decl.renderFullyQualifiedName(mod, writer); }, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -1627,45 +1604,6 @@ pub const Type = struct { return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); }, - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - if (union_obj.status == .field_types_wip) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - union_obj.assumed_runtime_bits = true; - return true; - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(union_obj.haveFieldTypes()), - .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, - } - for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) { - return true; - } - - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(union_obj.haveFieldTypes()), - .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, - } - for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - .array => return ty.arrayLen(mod) != 0 and try ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .array_sentinel => return ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), @@ -1795,10 +1733,40 @@ pub const Type = struct { } }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_type.runtime_tag) { + .none => { + if (union_obj.status == .field_types_wip) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + union_obj.assumed_runtime_bits = true; + return true; + } + }, + .safety, .tagged => { + if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) { + return true; + } + }, + } + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(union_obj.haveFieldTypes()), + .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, + } + for (union_obj.fields.values()) |value| { + if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + .opaque_type => true, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -1847,8 +1815,6 @@ pub const Type = struct { => ty.childType(mod).hasWellDefinedLayout(mod), .optional => ty.isPtrLikeOptional(mod), - .@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto, - .union_tagged => false, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => true, @@ -1912,10 +1878,14 @@ pub const Type = struct { }; return struct_obj.layout != .Auto; }, - .union_type => @panic("TODO"), + .union_type => |union_type| switch (union_type.runtime_tag) { + .none, .safety => mod.unionPtr(union_type.index).layout != .Auto, + .tagged => false, + }, .opaque_type => false, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -2146,14 +2116,6 @@ pub const Type = struct { const int_tag_ty = try ty.intTagType(mod); return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) }; }, - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, false); - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, true); - }, .inferred_alloc_const, .inferred_alloc_mut, @@ -2312,10 +2274,14 @@ pub const Type = struct { } return AbiAlignmentAdvanced{ .scalar = big_align }; }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); + }, .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 }, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -2508,14 +2474,6 @@ pub const Type = struct { const int_tag_ty = try ty.intTagType(mod); return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) }; }, - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - return abiSizeAdvancedUnion(ty, mod, strat, union_obj, false); - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true); - }, .array => { const payload = ty.castTag(.array).?.data; @@ -2737,10 +2695,14 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return abiSizeAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); + }, .opaque_type => unreachable, // no size available // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -2860,21 +2822,6 @@ pub const Type = struct { return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); }, - .@"union", .union_safety_tagged, .union_tagged => { - if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); - if (ty.containerLayout(mod) != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - const union_obj = ty.cast(Payload.Union).?.data; - assert(union_obj.haveFieldTypes()); - - var size: u64 = 0; - for (union_obj.fields.values()) |field| { - size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); - } - return size; - }, - .array => { const payload = ty.castTag(.array).?.data; const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); @@ -2996,10 +2943,24 @@ pub const Type = struct { return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + if (ty.containerLayout(mod) != .Packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + } + const union_obj = mod.unionPtr(union_type.index); + assert(union_obj.haveFieldTypes()); + + var size: u64 = 0; + for (union_obj.fields.values()) |field| { + size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); + } + return size; + }, .opaque_type => unreachable, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -3022,8 +2983,8 @@ pub const Type = struct { return true; }, .Union => { - if (ty.cast(Payload.Union)) |union_ty| { - return union_ty.data.haveLayout(); + if (mod.typeToUnion(ty)) |union_obj| { + return union_obj.haveLayout(); } return true; }, @@ -3413,76 +3374,71 @@ pub const Type = struct { /// Returns the tag type of a union, if the type is a union and it has a tag type. /// Otherwise, returns `null`. - pub fn unionTagType(ty: Type) ?Type { - return switch (ty.tag()) { - .union_tagged => { - const union_obj = ty.castTag(.union_tagged).?.data; - assert(union_obj.haveFieldTypes()); - return union_obj.tag_ty; + pub fn unionTagType(ty: Type, mod: *Module) ?Type { + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .union_type => |union_type| switch (union_type.runtime_tag) { + .tagged => { + const union_obj = mod.unionPtr(union_type.index); + assert(union_obj.haveFieldTypes()); + return union_obj.tag_ty; + }, + else => null, }, - else => null, }; } /// Same as `unionTagType` but includes safety tag. /// Codegen should use this version. - pub fn unionTagTypeSafety(ty: Type) ?Type { - return switch (ty.tag()) { - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; + pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type { + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .union_type => |union_type| { + if (!union_type.hasTag()) return null; + const union_obj = mod.unionPtr(union_type.index); assert(union_obj.haveFieldTypes()); return union_obj.tag_ty; }, - else => null, }; } /// Asserts the type is a union; returns the tag type, even if the tag will /// not be stored at runtime. - pub fn unionTagTypeHypothetical(ty: Type) Type { - const union_obj = ty.cast(Payload.Union).?.data; + pub fn unionTagTypeHypothetical(ty: Type, mod: *Module) Type { + const union_obj = mod.typeToUnion(ty).?; assert(union_obj.haveFieldTypes()); return union_obj.tag_ty; } - pub fn unionFields(ty: Type) Module.Union.Fields { - const union_obj = ty.cast(Payload.Union).?.data; + pub fn unionFields(ty: Type, mod: *Module) Module.Union.Fields { + const union_obj = mod.typeToUnion(ty).?; assert(union_obj.haveFieldTypes()); return union_obj.fields; } pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) Type { - const union_obj = ty.cast(Payload.Union).?.data; + const union_obj = mod.typeToUnion(ty).?; const index = ty.unionTagFieldIndex(enum_tag, mod).?; assert(union_obj.haveFieldTypes()); return union_obj.fields.values()[index].ty; } pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?usize { - const union_obj = ty.cast(Payload.Union).?.data; + const union_obj = mod.typeToUnion(ty).?; const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag, mod) orelse return null; const name = union_obj.tag_ty.enumFieldName(index); return union_obj.fields.getIndex(name); } pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool { - return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(mod); + const union_obj = mod.typeToUnion(ty).?; + return union_obj.hasAllZeroBitFieldTypes(mod); } pub fn unionGetLayout(ty: Type, mod: *Module) Module.Union.Layout { - switch (ty.tag()) { - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - return union_obj.getLayout(mod, false); - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.getLayout(mod, true); - }, - else => unreachable, - } + const union_type = mod.intern_pool.indexToKey(ty.ip_index).union_type; + const union_obj = mod.unionPtr(union_type.index); + return union_obj.getLayout(mod, union_type.hasTag()); } pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { @@ -3490,9 +3446,6 @@ pub const Type = struct { .empty_struct_type => .Auto, .none => switch (ty.tag()) { .tuple, .anon_struct => .Auto, - .@"union" => ty.castTag(.@"union").?.data.layout, - .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.layout, - .union_tagged => ty.castTag(.union_tagged).?.data.layout, else => unreachable, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -3500,6 +3453,10 @@ pub const Type = struct { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto; return struct_obj.layout; }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.layout; + }, else => unreachable, }, }; @@ -3777,6 +3734,7 @@ pub const Type = struct { .opaque_type => unreachable, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -4038,16 +3996,6 @@ pub const Type = struct { return null; } }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; - if (union_obj.fields.count() == 0) return Value.@"unreachable"; - const only_field = union_obj.fields.values()[0]; - const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; - _ = tag_val; - _ = val_val; - return Value.empty_struct; - }, .array => { if (ty.arrayLen(mod) == 0) @@ -4153,10 +4101,23 @@ pub const Type = struct { return empty.toValue(); }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; + if (union_obj.fields.count() == 0) return Value.@"unreachable"; + const only_field = union_obj.fields.values()[0]; + const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; + const only = try mod.intern(.{ .un = .{ + .ty = ty.ip_index, + .tag = tag_val.ip_index, + .val = val_val.ip_index, + } }); + return only.toValue(); + }, .opaque_type => return null, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -4216,20 +4177,6 @@ pub const Type = struct { return false; }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .wip, .unknown => { - // Return false to avoid incorrect dependency loops. - // This will be handled correctly once merged with - // `Sema.typeRequiresComptime`. - return false; - }, - .no => return false, - .yes => return true, - } - }, - .error_union => return ty.errorUnionPayload().comptimeOnly(mod), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; @@ -4321,10 +4268,24 @@ pub const Type = struct { } }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_obj.requires_comptime) { + .wip, .unknown => { + // Return false to avoid incorrect dependency loops. + // This will be handled correctly once merged with + // `Sema.typeRequiresComptime`. + return false; + }, + .no => return false, + .yes => return true, + } + }, + .opaque_type => false, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -4378,15 +4339,13 @@ pub const Type = struct { .none => switch (ty.tag()) { .enum_full => ty.castTag(.enum_full).?.data.namespace.toOptional(), .enum_nonexhaustive => ty.castTag(.enum_nonexhaustive).?.data.namespace.toOptional(), - .@"union" => ty.castTag(.@"union").?.data.namespace.toOptional(), - .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.namespace.toOptional(), - .union_tagged => ty.castTag(.union_tagged).?.data.namespace.toOptional(), - else => .none, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), .struct_type => |struct_type| struct_type.namespace, + .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), + else => .none, }, }; @@ -4474,20 +4433,23 @@ pub const Type = struct { /// Asserts the type is an enum or a union. pub fn intTagType(ty: Type, mod: *Module) !Type { - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty, - .enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const field_count = enum_simple.fields.count(); - const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); - return mod.intType(.unsigned, bits); + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .enum_full, .enum_nonexhaustive => ty.cast(Payload.EnumFull).?.data.tag_ty, + .enum_numbered => ty.castTag(.enum_numbered).?.data.tag_ty, + .enum_simple => { + const enum_simple = ty.castTag(.enum_simple).?.data; + const field_count = enum_simple.fields.count(); + const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); + return mod.intType(.unsigned, bits); + }, + else => unreachable, }, - .union_tagged => { - return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(mod); + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .union_type => |union_type| mod.unionPtr(union_type.index).tag_ty.intTagType(mod), + else => unreachable, }, - else => unreachable, - } + }; } pub fn isNonexhaustiveEnum(ty: Type) bool { @@ -4663,10 +4625,6 @@ pub const Type = struct { pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.fields.values()[index].ty; - }, .tuple => return ty.castTag(.tuple).?.data.types[index], .anon_struct => return ty.castTag(.anon_struct).?.data.types[index], else => unreachable, @@ -4676,6 +4634,10 @@ pub const Type = struct { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; return struct_obj.fields.values()[index].ty; }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.fields.values()[index].ty; + }, else => unreachable, }, }; @@ -4684,10 +4646,6 @@ pub const Type = struct { pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 { switch (ty.ip_index) { .none => switch (ty.tag()) { - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.fields.values()[index].normalAlignment(mod); - }, .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod), .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod), else => unreachable, @@ -4698,6 +4656,10 @@ pub const Type = struct { assert(struct_obj.layout != .Packed); return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.fields.values()[index].normalAlignment(mod); + }, else => unreachable, }, } @@ -4889,18 +4851,6 @@ pub const Type = struct { return offset; }, - .@"union" => return 0, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const layout = union_obj.getLayout(mod, true); - if (layout.tag_align >= layout.payload_align) { - // {Tag, Payload} - return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); - } else { - // {Payload, Tag} - return 0; - } - }, else => unreachable, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -4917,6 +4867,20 @@ pub const Type = struct { return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); }, + .union_type => |union_type| { + if (!union_type.hasTag()) + return 0; + const union_obj = mod.unionPtr(union_type.index); + const layout = union_obj.getLayout(mod, true); + if (layout.tag_align >= layout.payload_align) { + // {Tag, Payload} + return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + } else { + // {Payload, Tag} + return 0; + } + }, + else => unreachable, }, } @@ -4946,10 +4910,6 @@ pub const Type = struct { const error_set = ty.castTag(.error_set).?.data; return error_set.srcLoc(mod); }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.srcLoc(mod); - }, else => return null, }, @@ -4958,7 +4918,10 @@ pub const Type = struct { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; return struct_obj.srcLoc(mod); }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.srcLoc(mod); + }, .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), else => null, }, @@ -4985,10 +4948,6 @@ pub const Type = struct { const error_set = ty.castTag(.error_set).?.data; return error_set.owner_decl; }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.owner_decl; - }, else => return null, }, @@ -4997,7 +4956,10 @@ pub const Type = struct { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null; return struct_obj.owner_decl; }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.owner_decl; + }, .opaque_type => |opaque_type| opaque_type.decl, else => null, }, @@ -5039,9 +5001,6 @@ pub const Type = struct { /// The type is the inferred error set of a specific function. error_set_inferred, error_set_merged, - @"union", - union_safety_tagged, - union_tagged, enum_simple, enum_numbered, enum_full, @@ -5070,7 +5029,6 @@ pub const Type = struct { .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, - .@"union", .union_safety_tagged, .union_tagged => Payload.Union, .enum_full, .enum_nonexhaustive => Payload.EnumFull, .enum_simple => Payload.EnumSimple, .enum_numbered => Payload.EnumNumbered, @@ -5373,11 +5331,6 @@ pub const Type = struct { }; }; - pub const Union = struct { - base: Payload, - data: *Module.Union, - }; - pub const EnumFull = struct { base: Payload, data: *Module.EnumFull, diff --git a/src/value.zig b/src/value.zig index 3992888b3d77..dfeaa4442829 100644 --- a/src/value.zig +++ b/src/value.zig @@ -715,7 +715,7 @@ pub const Value = struct { } pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { - if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(), mod); + if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(mod), mod); const field_index = switch (val.tag()) { .enum_field_index => val.castTag(.enum_field_index).?.data, @@ -1138,7 +1138,7 @@ pub const Value = struct { .Extern => unreachable, // Handled in non-packed writeToMemory .Packed => { const field_index = ty.unionTagFieldIndex(val.unionTag(), mod); - const field_type = ty.unionFields().values()[field_index.?].ty; + const field_type = ty.unionFields(mod).values()[field_index.?].ty; const field_val = try val.fieldValue(field_type, mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); @@ -2021,7 +2021,7 @@ pub const Value = struct { const b_union = b.castTag(.@"union").?.data; switch (ty.containerLayout(mod)) { .Packed, .Extern => { - const tag_ty = ty.unionTagTypeHypothetical(); + const tag_ty = ty.unionTagTypeHypothetical(mod); if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, opt_sema))) { // In this case, we must disregard mismatching tags and compare // based on the in-memory bytes of the payloads. @@ -2029,7 +2029,7 @@ pub const Value = struct { } }, .Auto => { - const tag_ty = ty.unionTagTypeHypothetical(); + const tag_ty = ty.unionTagTypeHypothetical(mod); if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, opt_sema))) { return false; } @@ -2118,7 +2118,7 @@ pub const Value = struct { return false; } const field_name = tuple.names[0]; - const union_obj = ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(ty).?; const field_index = union_obj.fields.getIndex(field_name) orelse return false; const tag_and_val = b.castTag(.@"union").?.data; var field_tag_buf: Value.Payload.U32 = .{ @@ -2297,7 +2297,7 @@ pub const Value = struct { }, .Union => { const union_obj = val.cast(Payload.Union).?.data; - if (ty.unionTagType()) |tag_ty| { + if (ty.unionTagType(mod)) |tag_ty| { union_obj.tag.hash(tag_ty, hasher, mod); } const active_field_ty = ty.unionFieldType(union_obj.tag, mod); From 1c7095cb7dfcba3537edf3624a61046c9b772b1f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 May 2023 20:43:54 -0700 Subject: [PATCH 059/205] add std.hash.uint32 This is handy if you have a u32 and want a u32 and don't want to take a detour through many layers of abstraction elsewhere in the std.hash namespace. Copied from https://nullprogram.com/blog/2018/07/31/ --- lib/std/hash.zig | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/lib/std/hash.zig b/lib/std/hash.zig index 5c85b38d550b..eca7a7015952 100644 --- a/lib/std/hash.zig +++ b/lib/std/hash.zig @@ -36,6 +36,20 @@ const xxhash = @import("hash/xxhash.zig"); pub const XxHash64 = xxhash.XxHash64; pub const XxHash32 = xxhash.XxHash32; +/// This is handy if you have a u32 and want a u32 and don't want to take a +/// detour through many layers of abstraction elsewhere in the std.hash +/// namespace. +/// Copied from https://nullprogram.com/blog/2018/07/31/ +pub fn uint32(input: u32) u32 { + var x: u32 = input; + x ^= x >> 16; + x *%= 0x7feb352d; + x ^= x >> 15; + x *%= 0x846ca68b; + x ^= x >> 16; + return x; +} + test { _ = adler; _ = auto_hash; From 50bebb9e21c7e131522bec467b477ed7f55feb91 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 May 2023 20:47:33 -0700 Subject: [PATCH 060/205] InternPool: ability to encode enums This introduces a string table into InternPool as well as a curious new field called `maps` which is an array list of array hash maps with void/void key/value. Some types such as enums, structs, and unions need to store mappings from field names to field index, or value to field index. In such cases, they will store the underlying field names and values directly, relying on one of these maps, stored separately, to provide lookup. This allows the InternPool to be serialized via simple array copies, omitting all the maps, which are only used for optimizing lookup based on field name or field value. When the InternPool is deserialized it can be loaded via simple array copies, and then as a post-processing step the field name maps can be generated as extra metadata that is tacked on. This commit provides two encodings for enums - one when the integer tag type is explicitly provided and one when it is not. This is simpler than the previous setup, which has three encodings. Previous sizes: * EnumSimple: 40 bytes + 16 bytes per field * EnumNumbered: 80 bytes + 24 bytes per field * EnumFull: 184 bytes + 24 bytes per field Sizes after this commit: * type_enum_explicit: 24 bytes + 8 bytes per field * type_enum_auto: 16 bytes + 4 bytes per field --- src/InternPool.zig | 299 ++++++++++++++++++++++++++++++++++++++++++--- src/Sema.zig | 4 + src/type.zig | 11 ++ 3 files changed, 297 insertions(+), 17 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 4c4e3ab78a4c..92f1d1fad523 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -13,6 +13,12 @@ extra: std.ArrayListUnmanaged(u32) = .{}, /// Use the helper methods instead of accessing this directly in order to not /// violate the above mechanism. limbs: std.ArrayListUnmanaged(u64) = .{}, +/// In order to store references to strings in fewer bytes, we copy all +/// string bytes into here. String bytes can be null. It is up to whomever +/// is referencing the data here whether they want to store both index and length, +/// thus allowing null bytes, or store only index, and use null-termination. The +/// `string_bytes` array is agnostic to either usage. +string_bytes: std.ArrayListUnmanaged(u8) = .{}, /// Struct objects are stored in this data structure because: /// * They contain pointers such as the field maps. @@ -28,6 +34,12 @@ allocated_unions: std.SegmentedList(Module.Union, 0) = .{}, /// When a Union object is freed from `allocated_unions`, it is pushed into this stack. unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, +/// Some types such as enums, structs, and unions need to store mappings from field names +/// to field index, or value to field index. In such cases, they will store the underlying +/// field names and values directly, relying on one of these maps, stored separately, +/// to provide lookup. +maps: std.ArrayListUnmanaged(std.AutoArrayHashMapUnmanaged(void, void)) = .{}, + const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -52,6 +64,46 @@ const KeyAdapter = struct { } }; +/// An index into `maps` which might be `none`. +pub const OptionalMapIndex = enum(u32) { + none = std.math.maxInt(u32), + _, +}; + +/// An index into `maps`. +pub const MapIndex = enum(u32) { + _, + + pub fn toOptional(i: MapIndex) OptionalMapIndex { + return @intToEnum(OptionalMapIndex, @enumToInt(i)); + } +}; + +/// An index into `string_bytes`. +pub const NullTerminatedString = enum(u32) { + _, + + const Adapter = struct { + strings: []const NullTerminatedString, + + pub fn eql(ctx: @This(), a: NullTerminatedString, b_void: void, b_map_index: usize) bool { + _ = b_void; + return a == ctx.strings[b_map_index]; + } + + pub fn hash(ctx: @This(), a: NullTerminatedString) u32 { + _ = ctx; + return std.hash.uint32(@enumToInt(a)); + } + }; +}; + +/// An index into `string_bytes` which might be `none`. +pub const OptionalNullTerminatedString = enum(u32) { + none = std.math.maxInt(u32), + _, +}; + pub const Key = union(enum) { int_type: IntType, ptr_type: PtrType, @@ -68,6 +120,7 @@ pub const Key = union(enum) { struct_type: StructType, union_type: UnionType, opaque_type: OpaqueType, + enum_type: EnumType, simple_value: SimpleValue, extern_func: struct { @@ -174,6 +227,30 @@ pub const Key = union(enum) { } }; + pub const EnumType = struct { + /// The Decl that corresponds to the enum itself. + decl: Module.Decl.Index, + /// Represents the declarations inside this enum. + namespace: Module.Namespace.OptionalIndex, + /// An integer type which is used for the numerical value of the enum. + /// This field is present regardless of whether the enum has an + /// explicitly provided tag type or auto-numbered. + tag_ty: Index, + /// Set of field names in declaration order. + names: []const NullTerminatedString, + /// Maps integer tag value to field index. + /// Entries are in declaration order, same as `fields`. + /// If this is empty, it means the enum tags are auto-numbered. + values: []const Index, + /// true if zig inferred this tag type, false if user specified it + tag_ty_inferred: bool, + /// This is ignored by `get` but will always be provided by `indexToKey`. + names_map: OptionalMapIndex = .none, + /// This is ignored by `get` but will be provided by `indexToKey` when + /// a value map exists. + values_map: OptionalMapIndex = .none, + }; + pub const Int = struct { ty: Index, storage: Storage, @@ -263,6 +340,7 @@ pub const Key = union(enum) { => |info| std.hash.autoHash(hasher, info), .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), + .enum_type => |enum_type| std.hash.autoHash(hasher, enum_type.decl), .int => |int| { // Canonicalize all integers by converting them to BigIntConst. @@ -410,6 +488,10 @@ pub const Key = union(enum) { const b_info = b.opaque_type; return a_info.decl == b_info.decl; }, + .enum_type => |a_info| { + const b_info = b.enum_type; + return a_info.decl == b_info.decl; + }, .aggregate => |a_info| { const b_info = b.aggregate; if (a_info.ty != b_info.ty) return false; @@ -430,6 +512,7 @@ pub const Key = union(enum) { .struct_type, .union_type, .opaque_type, + .enum_type, => return .type_type, inline .ptr, @@ -592,6 +675,21 @@ pub const Index = enum(u32) { .legacy = undefined, }; } + + /// Used for a map of `Index` values to the index within a list of `Index` values. + const Adapter = struct { + indexes: []const Index, + + pub fn eql(ctx: @This(), a: Index, b_void: void, b_map_index: usize) bool { + _ = b_void; + return a == ctx.indexes[b_map_index]; + } + + pub fn hash(ctx: @This(), a: Index) u32 { + _ = ctx; + return std.hash.uint32(@enumToInt(a)); + } + }; }; pub const static_keys = [_]Key{ @@ -848,10 +946,12 @@ pub const Tag = enum(u8) { /// An error union type. /// data is payload to ErrorUnion. type_error_union, - /// Represents the data that an enum declaration provides, when the fields - /// are auto-numbered, and there are no declarations. - /// data is payload index to `EnumSimple`. - type_enum_simple, + /// An enum type with an explicitly provided integer tag type. + /// data is payload index to `EnumExplicit`. + type_enum_explicit, + /// An enum type with auto-numbered tag values. + /// data is payload index to `EnumAuto`. + type_enum_auto, /// A type that can be represented with only an enum tag. /// data is SimpleType enum value. simple_type, @@ -1087,17 +1187,35 @@ pub const ErrorUnion = struct { }; /// Trailing: -/// 0. field name: null-terminated string index for each fields_len; declaration order -pub const EnumSimple = struct { +/// 0. field name: NullTerminatedString for each fields_len; declaration order +/// 1. tag value: Index for each fields_len; declaration order +pub const EnumExplicit = struct { + /// The Decl that corresponds to the enum itself. + decl: Module.Decl.Index, + /// This may be `none` if there are no declarations. + namespace: Module.Namespace.OptionalIndex, + /// An integer type which is used for the numerical value of the enum, which + /// has been explicitly provided by the enum declaration. + int_tag_type: Index, + fields_len: u32, + /// Maps field names to declaration index. + names_map: MapIndex, + /// Maps field values to declaration index. + /// If this is `none`, it means the trailing tag values are absent because + /// they are auto-numbered. + values_map: OptionalMapIndex, +}; + +/// Trailing: +/// 0. field name: NullTerminatedString for each fields_len; declaration order +pub const EnumAuto = struct { /// The Decl that corresponds to the enum itself. decl: Module.Decl.Index, - /// An integer type which is used for the numerical value of the enum. This - /// is inferred by Zig to be the smallest power of two unsigned int that - /// fits the number of fields. It is stored here to avoid unnecessary - /// calculations and possibly allocation failure when querying the tag type - /// of enums. - int_tag_ty: Index, + /// This may be `none` if there are no declarations. + namespace: Module.Namespace.OptionalIndex, fields_len: u32, + /// Maps field names to declaration index. + names_map: MapIndex, }; pub const PackedU64 = packed struct(u64) { @@ -1183,6 +1301,9 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.unions_free_list.deinit(gpa); ip.allocated_unions.deinit(gpa); + ip.maps.deinit(gpa); + ip.string_bytes.deinit(gpa); + ip.* = undefined; } @@ -1256,7 +1377,6 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_optional => .{ .opt_type = @intToEnum(Index, data) }, .type_error_union => @panic("TODO"), - .type_enum_simple => @panic("TODO"), .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, .type_struct => { @@ -1288,6 +1408,46 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .runtime_tag = .safety, } }, + .type_enum_auto => { + const enum_auto = ip.extraDataTrail(EnumAuto, data); + const names = @ptrCast( + []const NullTerminatedString, + ip.extra.items[enum_auto.end..][0..enum_auto.data.fields_len], + ); + return .{ .enum_type = .{ + .decl = enum_auto.data.decl, + .namespace = enum_auto.data.namespace, + .tag_ty = ip.getEnumIntTagType(enum_auto.data.fields_len), + .names = names, + .values = &.{}, + .tag_ty_inferred = true, + .names_map = enum_auto.data.names_map.toOptional(), + .values_map = .none, + } }; + }, + .type_enum_explicit => { + const enum_explicit = ip.extraDataTrail(EnumExplicit, data); + const names = @ptrCast( + []const NullTerminatedString, + ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len], + ); + const values = if (enum_explicit.data.values_map != .none) @ptrCast( + []const Index, + ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len], + ) else &[0]Index{}; + + return .{ .enum_type = .{ + .decl = enum_explicit.data.decl, + .namespace = enum_explicit.data.namespace, + .tag_ty = enum_explicit.data.int_tag_type, + .names = names, + .values = values, + .tag_ty_inferred = false, + .names_map = enum_explicit.data.names_map.toOptional(), + .values_map = enum_explicit.data.values_map, + } }; + }, + .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), .val = .none, @@ -1362,6 +1522,14 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }; } +/// Asserts the integer tag type is already present in the InternPool. +fn getEnumIntTagType(ip: InternPool, fields_len: u32) Index { + return ip.getAssumeExists(.{ .int_type = .{ + .bits = if (fields_len == 0) 0 else std.math.log2_int_ceil(u32, fields_len), + .signedness = .unsigned, + } }); +} + fn indexToKeyBigInt(ip: InternPool, limb_index: u32, positive: bool) Key { const int_info = ip.limbData(Int, limb_index); return .{ .int = .{ @@ -1522,6 +1690,54 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, + .enum_type => |enum_type| { + assert(enum_type.tag_ty != .none); + assert(enum_type.names_map == .none); + assert(enum_type.values_map == .none); + + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, enum_type.names); + + const fields_len = @intCast(u32, enum_type.names.len); + + if (enum_type.tag_ty_inferred) { + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + + fields_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_enum_auto, + .data = ip.addExtraAssumeCapacity(EnumAuto{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .names_map = names_map, + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); + return @intToEnum(Index, ip.items.len - 1); + } + + const values_map: OptionalMapIndex = if (enum_type.values.len == 0) .none else m: { + const values_map = try ip.addMap(gpa); + try addIndexesToMap(ip, gpa, values_map, enum_type.values); + break :m values_map.toOptional(); + }; + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + + fields_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_enum_auto, + .data = ip.addExtraAssumeCapacity(EnumExplicit{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = enum_type.tag_ty, + .fields_len = fields_len, + .names_map = names_map, + .values_map = values_map, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.values)); + }, + .extern_func => @panic("TODO"), .ptr => |ptr| switch (ptr.addr) { @@ -1723,6 +1939,40 @@ pub fn getAssumeExists(ip: InternPool, key: Key) Index { return @intToEnum(Index, index); } +fn addStringsToMap( + ip: *InternPool, + gpa: Allocator, + map_index: MapIndex, + strings: []const NullTerminatedString, +) Allocator.Error!void { + const map = &ip.maps.items[@enumToInt(map_index)]; + const adapter: NullTerminatedString.Adapter = .{ .strings = strings }; + for (strings) |string| { + const gop = try map.getOrPutAdapted(gpa, string, adapter); + assert(!gop.found_existing); + } +} + +fn addIndexesToMap( + ip: *InternPool, + gpa: Allocator, + map_index: MapIndex, + indexes: []const Index, +) Allocator.Error!void { + const map = &ip.maps.items[@enumToInt(map_index)]; + const adapter: Index.Adapter = .{ .indexes = indexes }; + for (indexes) |index| { + const gop = try map.getOrPutAdapted(gpa, index, adapter); + assert(!gop.found_existing); + } +} + +fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex { + const ptr = try ip.maps.addOne(gpa); + ptr.* = .{}; + return @intToEnum(MapIndex, ip.maps.items.len - 1); +} + /// This operation only happens under compile error conditions. /// Leak the index until the next garbage collection. pub fn remove(ip: *InternPool, index: Index) void { @@ -1758,6 +2008,9 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { Index => @enumToInt(@field(extra, field.name)), Module.Decl.Index => @enumToInt(@field(extra, field.name)), Module.Namespace.Index => @enumToInt(@field(extra, field.name)), + Module.Namespace.OptionalIndex => @enumToInt(@field(extra, field.name)), + MapIndex => @enumToInt(@field(extra, field.name)), + OptionalMapIndex => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), Pointer.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), @@ -1806,15 +2059,19 @@ fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { } } -fn extraData(ip: InternPool, comptime T: type, index: usize) T { +fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { var result: T = undefined; - inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { + const fields = @typeInfo(T).Struct.fields; + inline for (fields, 0..) |field, i| { const int32 = ip.extra.items[i + index]; @field(result, field.name) = switch (field.type) { u32 => int32, Index => @intToEnum(Index, int32), Module.Decl.Index => @intToEnum(Module.Decl.Index, int32), Module.Namespace.Index => @intToEnum(Module.Namespace.Index, int32), + Module.Namespace.OptionalIndex => @intToEnum(Module.Namespace.OptionalIndex, int32), + MapIndex => @intToEnum(MapIndex, int32), + OptionalMapIndex => @intToEnum(OptionalMapIndex, int32), i32 => @bitCast(i32, int32), Pointer.Flags => @bitCast(Pointer.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), @@ -1822,7 +2079,14 @@ fn extraData(ip: InternPool, comptime T: type, index: usize) T { else => @compileError("bad field type: " ++ @typeName(field.type)), }; } - return result; + return .{ + .data = result, + .end = index + fields.len, + }; +} + +fn extraData(ip: InternPool, comptime T: type, index: usize) T { + return extraDataTrail(ip, T, index).data; } /// Asserts the struct has 32-bit fields and the number of fields is evenly divisible by 2. @@ -2071,7 +2335,8 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_slice => 0, .type_optional => 0, .type_error_union => @sizeOf(ErrorUnion), - .type_enum_simple => @sizeOf(EnumSimple), + .type_enum_explicit => @sizeOf(EnumExplicit), + .type_enum_auto => @sizeOf(EnumAuto), .type_opaque => @sizeOf(Key.OpaqueType), .type_struct => @sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), .type_struct_ns => @sizeOf(Module.Namespace), diff --git a/src/Sema.zig b/src/Sema.zig index 76ac887c06bb..e9bf66565e43 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -31760,6 +31760,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .opaque_type => false, + .enum_type => @panic("TODO"), + // values, not types .un => unreachable, .simple_value => unreachable, @@ -33293,6 +33295,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return only.toValue(); }, .opaque_type => null, + .enum_type => @panic("TODO"), // values, not types .un => unreachable, @@ -33862,6 +33865,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, .opaque_type => false, + .enum_type => @panic("TODO"), // values, not types .un => unreachable, diff --git a/src/type.zig b/src/type.zig index 0096a96aa22a..ab02b29d49be 100644 --- a/src/type.zig +++ b/src/type.zig @@ -79,6 +79,7 @@ pub const Type = struct { .struct_type => return .Struct, .union_type => return .Union, .opaque_type => return .Opaque, + .enum_type => return .Enum, .simple_type => |s| switch (s) { .f16, .f32, @@ -1499,6 +1500,7 @@ pub const Type = struct { const decl = mod.declPtr(opaque_type.decl); try decl.renderFullyQualifiedName(mod, writer); }, + .enum_type => @panic("TODO"), // values, not types .un => unreachable, @@ -1764,6 +1766,7 @@ pub const Type = struct { }, .opaque_type => true, + .enum_type => @panic("TODO"), // values, not types .un => unreachable, @@ -1883,6 +1886,7 @@ pub const Type = struct { .tagged => false, }, .opaque_type => false, + .enum_type => @panic("TODO"), // values, not types .un => unreachable, @@ -2279,6 +2283,7 @@ pub const Type = struct { return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); }, .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 }, + .enum_type => @panic("TODO"), // values, not types .un => unreachable, @@ -2700,6 +2705,7 @@ pub const Type = struct { return abiSizeAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); }, .opaque_type => unreachable, // no size available + .enum_type => @panic("TODO"), // values, not types .un => unreachable, @@ -2958,6 +2964,7 @@ pub const Type = struct { return size; }, .opaque_type => unreachable, + .enum_type => @panic("TODO"), // values, not types .un => unreachable, @@ -3721,6 +3728,7 @@ pub const Type = struct { assert(struct_obj.layout == .Packed); ty = struct_obj.backing_int_ty; }, + .enum_type => @panic("TODO"), .ptr_type => unreachable, .array_type => unreachable, @@ -4115,6 +4123,7 @@ pub const Type = struct { return only.toValue(); }, .opaque_type => return null, + .enum_type => @panic("TODO"), // values, not types .un => unreachable, @@ -4284,6 +4293,8 @@ pub const Type = struct { .opaque_type => false, + .enum_type => @panic("TODO"), + // values, not types .un => unreachable, .simple_value => unreachable, From 404cbc36c52a50975a69e78da716f2258e5b1696 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 May 2023 21:06:50 -0700 Subject: [PATCH 061/205] InternPool: fix deinit leaking inner maps --- src/InternPool.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 92f1d1fad523..4c7b7016eae7 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1294,6 +1294,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.items.deinit(gpa); ip.extra.deinit(gpa); ip.limbs.deinit(gpa); + ip.string_bytes.deinit(gpa); ip.structs_free_list.deinit(gpa); ip.allocated_structs.deinit(gpa); @@ -1301,8 +1302,8 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.unions_free_list.deinit(gpa); ip.allocated_unions.deinit(gpa); + for (ip.maps) |*map| map.deinit(gpa); ip.maps.deinit(gpa); - ip.string_bytes.deinit(gpa); ip.* = undefined; } From 5881a2d63771b070107bdc2325aa1bc455b2d926 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 12 May 2023 00:07:32 -0700 Subject: [PATCH 062/205] stage2: move enum types into the InternPool Unlike unions and structs, enums are actually *encoded* into the InternPool directly, rather than using the SegmentedList trick. This results in them being quite compact, and greatly improved the ergonomics of using enum types throughout the compiler. It did however require introducing a new concept to the InternPool which is an "incomplete" item - something that is added to gain a permanent Index, but which is then mutated in place. This was necessary because enum tag values and tag types may reference the namespaces created by the enum itself, which required constructing the namespace, decl, and calling analyzeDecl on the decl, which required the decl value, which required the enum type, which required an InternPool index to be assigned and for it to be meaningful. The API for updating enums in place turned out to be quite slick and efficient - the methods directly populate pre-allocated arrays and return the information necessary to output the same compilation errors as before. --- src/AstGen.zig | 4 +- src/InternPool.zig | 465 ++++++++++++++++++---- src/Module.zig | 195 ++------- src/Sema.zig | 774 +++++++++++++++--------------------- src/TypedValue.zig | 2 +- src/arch/wasm/CodeGen.zig | 50 +-- src/arch/x86_64/CodeGen.zig | 9 +- src/codegen.zig | 32 +- src/codegen/c.zig | 32 +- src/codegen/llvm.zig | 64 ++- src/link/Dwarf.zig | 21 +- src/type.zig | 402 +++++-------------- src/value.zig | 62 +-- 13 files changed, 934 insertions(+), 1178 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index edd609912787..998e08ba04e7 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -10694,8 +10694,8 @@ fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !u32 { const string_bytes = &astgen.string_bytes; const str_index = @intCast(u32, string_bytes.items.len); try astgen.appendIdentStr(ident_token, string_bytes); - const key = string_bytes.items[str_index..]; - const gop = try astgen.string_table.getOrPutContextAdapted(gpa, @as([]const u8, key), StringIndexAdapter{ + const key: []const u8 = string_bytes.items[str_index..]; + const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{ .bytes = string_bytes, }, StringIndexContext{ .bytes = string_bytes, diff --git a/src/InternPool.zig b/src/InternPool.zig index 4c7b7016eae7..6ff68a758368 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -40,6 +40,14 @@ unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, /// to provide lookup. maps: std.ArrayListUnmanaged(std.AutoArrayHashMapUnmanaged(void, void)) = .{}, +/// Used for finding the index inside `string_bytes`. +string_table: std.HashMapUnmanaged( + u32, + void, + std.hash_map.StringIndexContext, + std.hash_map.default_max_load_percentage, +) = .{}, + const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -68,6 +76,11 @@ const KeyAdapter = struct { pub const OptionalMapIndex = enum(u32) { none = std.math.maxInt(u32), _, + + pub fn unwrap(oi: OptionalMapIndex) ?MapIndex { + if (oi == .none) return null; + return @intToEnum(MapIndex, @enumToInt(oi)); + } }; /// An index into `maps`. @@ -83,6 +96,10 @@ pub const MapIndex = enum(u32) { pub const NullTerminatedString = enum(u32) { _, + pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { + return @intToEnum(OptionalNullTerminatedString, @enumToInt(self)); + } + const Adapter = struct { strings: []const NullTerminatedString, @@ -102,6 +119,11 @@ pub const NullTerminatedString = enum(u32) { pub const OptionalNullTerminatedString = enum(u32) { none = std.math.maxInt(u32), _, + + pub fn unwrap(oi: OptionalNullTerminatedString) ?NullTerminatedString { + if (oi == .none) return null; + return @intToEnum(NullTerminatedString, @enumToInt(oi)); + } }; pub const Key = union(enum) { @@ -242,13 +264,75 @@ pub const Key = union(enum) { /// Entries are in declaration order, same as `fields`. /// If this is empty, it means the enum tags are auto-numbered. values: []const Index, - /// true if zig inferred this tag type, false if user specified it - tag_ty_inferred: bool, + tag_mode: TagMode, /// This is ignored by `get` but will always be provided by `indexToKey`. names_map: OptionalMapIndex = .none, /// This is ignored by `get` but will be provided by `indexToKey` when /// a value map exists. values_map: OptionalMapIndex = .none, + + pub const TagMode = enum { + /// The integer tag type was auto-numbered by zig. + auto, + /// The integer tag type was provided by the enum declaration, and the enum + /// is exhaustive. + explicit, + /// The integer tag type was provided by the enum declaration, and the enum + /// is non-exhaustive. + nonexhaustive, + }; + + /// Look up field index based on field name. + pub fn nameIndex(self: EnumType, ip: InternPool, name: NullTerminatedString) ?usize { + const map = &ip.maps.items[@enumToInt(self.names_map.unwrap().?)]; + const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; + return map.getIndexAdapted(name, adapter); + } + + /// Look up field index based on tag value. + /// Asserts that `values_map` is not `none`. + /// This function returns `null` when `tag_val` does not have the + /// integer tag type of the enum. + pub fn tagValueIndex(self: EnumType, ip: InternPool, tag_val: Index) ?usize { + assert(tag_val != .none); + const map = &ip.maps.items[@enumToInt(self.values_map.unwrap().?)]; + const adapter: Index.Adapter = .{ .indexes = self.values }; + return map.getIndexAdapted(tag_val, adapter); + } + }; + + pub const IncompleteEnumType = struct { + /// Same as corresponding `EnumType` field. + decl: Module.Decl.Index, + /// Same as corresponding `EnumType` field. + namespace: Module.Namespace.OptionalIndex, + /// The field names and field values are not known yet, but + /// the number of fields must be known ahead of time. + fields_len: u32, + /// This information is needed so that the size does not change + /// later when populating field values. + has_values: bool, + /// Same as corresponding `EnumType` field. + tag_mode: EnumType.TagMode, + /// This may be updated via `setTagType` later. + tag_ty: Index = .none, + + pub fn toEnumType(self: @This()) EnumType { + return .{ + .decl = self.decl, + .namespace = self.namespace, + .tag_ty = self.tag_ty, + .tag_mode = self.tag_mode, + .names = &.{}, + .values = &.{}, + }; + } + + /// Only the decl is used for hashing and equality, so we can construct + /// this minimal key for use with `map`. + pub fn toKey(self: @This()) Key { + return .{ .enum_type = self.toEnumType() }; + } }; pub const Int = struct { @@ -946,12 +1030,18 @@ pub const Tag = enum(u8) { /// An error union type. /// data is payload to ErrorUnion. type_error_union, - /// An enum type with an explicitly provided integer tag type. - /// data is payload index to `EnumExplicit`. - type_enum_explicit, /// An enum type with auto-numbered tag values. + /// The enum is exhaustive. /// data is payload index to `EnumAuto`. type_enum_auto, + /// An enum type with an explicitly provided integer tag type. + /// The enum is exhaustive. + /// data is payload index to `EnumExplicit`. + type_enum_explicit, + /// An enum type with an explicitly provided integer tag type. + /// The enum is non-exhaustive. + /// data is payload index to `EnumExplicit`. + type_enum_nonexhaustive, /// A type that can be represented with only an enum tag. /// data is SimpleType enum value. simple_type, @@ -1302,9 +1392,11 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.unions_free_list.deinit(gpa); ip.allocated_unions.deinit(gpa); - for (ip.maps) |*map| map.deinit(gpa); + for (ip.maps.items) |*map| map.deinit(gpa); ip.maps.deinit(gpa); + ip.string_table.deinit(gpa); + ip.* = undefined; } @@ -1421,33 +1513,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .tag_ty = ip.getEnumIntTagType(enum_auto.data.fields_len), .names = names, .values = &.{}, - .tag_ty_inferred = true, + .tag_mode = .auto, .names_map = enum_auto.data.names_map.toOptional(), .values_map = .none, } }; }, - .type_enum_explicit => { - const enum_explicit = ip.extraDataTrail(EnumExplicit, data); - const names = @ptrCast( - []const NullTerminatedString, - ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len], - ); - const values = if (enum_explicit.data.values_map != .none) @ptrCast( - []const Index, - ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len], - ) else &[0]Index{}; - - return .{ .enum_type = .{ - .decl = enum_explicit.data.decl, - .namespace = enum_explicit.data.namespace, - .tag_ty = enum_explicit.data.int_tag_type, - .names = names, - .values = values, - .tag_ty_inferred = false, - .names_map = enum_explicit.data.names_map.toOptional(), - .values_map = enum_explicit.data.values_map, - } }; - }, + .type_enum_explicit => indexToKeyEnum(ip, data, .explicit), + .type_enum_nonexhaustive => indexToKeyEnum(ip, data, .nonexhaustive), .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), @@ -1531,6 +1603,29 @@ fn getEnumIntTagType(ip: InternPool, fields_len: u32) Index { } }); } +fn indexToKeyEnum(ip: InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { + const enum_explicit = ip.extraDataTrail(EnumExplicit, data); + const names = @ptrCast( + []const NullTerminatedString, + ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len], + ); + const values = if (enum_explicit.data.values_map != .none) @ptrCast( + []const Index, + ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len], + ) else &[0]Index{}; + + return .{ .enum_type = .{ + .decl = enum_explicit.data.decl, + .namespace = enum_explicit.data.namespace, + .tag_ty = enum_explicit.data.int_tag_type, + .names = names, + .values = values, + .tag_mode = tag_mode, + .names_map = enum_explicit.data.names_map.toOptional(), + .values_map = enum_explicit.data.values_map, + } }; +} + fn indexToKeyBigInt(ip: InternPool, limb_index: u32, positive: bool) Key { const int_info = ip.limbData(Int, limb_index); return .{ .int = .{ @@ -1696,47 +1791,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(enum_type.names_map == .none); assert(enum_type.values_map == .none); - const names_map = try ip.addMap(gpa); - try addStringsToMap(ip, gpa, names_map, enum_type.names); + switch (enum_type.tag_mode) { + .auto => { + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, enum_type.names); - const fields_len = @intCast(u32, enum_type.names.len); - - if (enum_type.tag_ty_inferred) { - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + - fields_len); - ip.items.appendAssumeCapacity(.{ - .tag = .type_enum_auto, - .data = ip.addExtraAssumeCapacity(EnumAuto{ - .decl = enum_type.decl, - .namespace = enum_type.namespace, - .names_map = names_map, - .fields_len = fields_len, - }), - }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); - return @intToEnum(Index, ip.items.len - 1); + const fields_len = @intCast(u32, enum_type.names.len); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + + fields_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_enum_auto, + .data = ip.addExtraAssumeCapacity(EnumAuto{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .names_map = names_map, + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); + return @intToEnum(Index, ip.items.len - 1); + }, + .explicit => return finishGetEnum(ip, gpa, enum_type, .type_enum_explicit), + .nonexhaustive => return finishGetEnum(ip, gpa, enum_type, .type_enum_nonexhaustive), } - - const values_map: OptionalMapIndex = if (enum_type.values.len == 0) .none else m: { - const values_map = try ip.addMap(gpa); - try addIndexesToMap(ip, gpa, values_map, enum_type.values); - break :m values_map.toOptional(); - }; - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + - fields_len); - ip.items.appendAssumeCapacity(.{ - .tag = .type_enum_auto, - .data = ip.addExtraAssumeCapacity(EnumExplicit{ - .decl = enum_type.decl, - .namespace = enum_type.namespace, - .int_tag_type = enum_type.tag_ty, - .fields_len = fields_len, - .names_map = names_map, - .values_map = values_map, - }), - }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.values)); }, .extern_func => @panic("TODO"), @@ -1934,8 +2011,206 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } -pub fn getAssumeExists(ip: InternPool, key: Key) Index { - const adapter: KeyAdapter = .{ .intern_pool = &ip }; +/// Provides API for completing an enum type after calling `getIncompleteEnum`. +pub const IncompleteEnumType = struct { + index: Index, + tag_ty_index: u32, + names_map: MapIndex, + names_start: u32, + values_map: OptionalMapIndex, + values_start: u32, + + pub fn setTagType(self: @This(), ip: *InternPool, tag_ty: Index) void { + assert(tag_ty != .none); + ip.extra.items[self.tag_ty_index] = @enumToInt(tag_ty); + } + + /// Returns the already-existing field with the same name, if any. + pub fn addFieldName( + self: @This(), + ip: *InternPool, + gpa: Allocator, + name: NullTerminatedString, + ) Allocator.Error!?u32 { + const map = &ip.maps.items[@enumToInt(self.names_map)]; + const field_index = map.count(); + const strings = ip.extra.items[self.names_start..][0..field_index]; + const adapter: NullTerminatedString.Adapter = .{ + .strings = @ptrCast([]const NullTerminatedString, strings), + }; + const gop = try map.getOrPutAdapted(gpa, name, adapter); + if (gop.found_existing) return @intCast(u32, gop.index); + ip.extra.items[self.names_start + field_index] = @enumToInt(name); + return null; + } + + /// Returns the already-existing field with the same value, if any. + /// Make sure the type of the value has the integer tag type of the enum. + pub fn addFieldValue( + self: @This(), + ip: *InternPool, + gpa: Allocator, + value: Index, + ) Allocator.Error!?u32 { + const map = &ip.maps.items[@enumToInt(self.values_map.unwrap().?)]; + const field_index = map.count(); + const indexes = ip.extra.items[self.values_start..][0..field_index]; + const adapter: Index.Adapter = .{ + .indexes = @ptrCast([]const Index, indexes), + }; + const gop = try map.getOrPutAdapted(gpa, value, adapter); + if (gop.found_existing) return @intCast(u32, gop.index); + ip.extra.items[self.values_start + field_index] = @enumToInt(value); + return null; + } +}; + +/// This is used to create an enum type in the `InternPool`, with the ability +/// to update the tag type, field names, and field values later. +pub fn getIncompleteEnum( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.IncompleteEnumType, +) Allocator.Error!InternPool.IncompleteEnumType { + switch (enum_type.tag_mode) { + .auto => return getIncompleteEnumAuto(ip, gpa, enum_type), + .explicit => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_explicit), + .nonexhaustive => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_nonexhaustive), + } +} + +pub fn getIncompleteEnumAuto( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.IncompleteEnumType, +) Allocator.Error!InternPool.IncompleteEnumType { + // Although the integer tag type will not be stored in the `EnumAuto` struct, + // `InternPool` logic depends on it being present so that `typeOf` can be infallible. + // Ensure it is present here: + _ = try ip.get(gpa, .{ .int_type = .{ + .bits = if (enum_type.fields_len == 0) 0 else std.math.log2_int_ceil(u32, enum_type.fields_len), + .signedness = .unsigned, + } }); + + // We must keep the map in sync with `items`. The hash and equality functions + // for enum types only look at the decl field, which is present even in + // an `IncompleteEnumType`. + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter); + assert(!gop.found_existing); + + const names_map = try ip.addMap(gpa); + + const extra_fields_len: u32 = @typeInfo(EnumAuto).Struct.fields.len; + try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + enum_type.fields_len); + + const extra_index = ip.addExtraAssumeCapacity(EnumAuto{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .names_map = names_map, + .fields_len = enum_type.fields_len, + }); + + ip.items.appendAssumeCapacity(.{ + .tag = .type_enum_auto, + .data = extra_index, + }); + ip.extra.appendNTimesAssumeCapacity(@enumToInt(Index.none), enum_type.fields_len); + return .{ + .index = @intToEnum(Index, ip.items.len - 1), + .tag_ty_index = undefined, + .names_map = names_map, + .names_start = extra_index + extra_fields_len, + .values_map = .none, + .values_start = undefined, + }; +} + +pub fn getIncompleteEnumExplicit( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.IncompleteEnumType, + tag: Tag, +) Allocator.Error!InternPool.IncompleteEnumType { + // We must keep the map in sync with `items`. The hash and equality functions + // for enum types only look at the decl field, which is present even in + // an `IncompleteEnumType`. + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter); + assert(!gop.found_existing); + + const names_map = try ip.addMap(gpa); + const values_map: OptionalMapIndex = if (!enum_type.has_values) .none else m: { + const values_map = try ip.addMap(gpa); + break :m values_map.toOptional(); + }; + + const reserved_len = enum_type.fields_len + + if (enum_type.has_values) enum_type.fields_len else 0; + + const extra_fields_len: u32 = @typeInfo(EnumExplicit).Struct.fields.len; + try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + reserved_len); + + const extra_index = ip.addExtraAssumeCapacity(EnumExplicit{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = enum_type.tag_ty, + .fields_len = enum_type.fields_len, + .names_map = names_map, + .values_map = values_map, + }); + + ip.items.appendAssumeCapacity(.{ + .tag = tag, + .data = extra_index, + }); + // This is both fields and values (if present). + ip.extra.appendNTimesAssumeCapacity(@enumToInt(Index.none), reserved_len); + return .{ + .index = @intToEnum(Index, ip.items.len - 1), + .tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?, + .names_map = names_map, + .names_start = extra_index + extra_fields_len, + .values_map = values_map, + .values_start = extra_index + extra_fields_len + enum_type.fields_len, + }; +} + +pub fn finishGetEnum( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.EnumType, + tag: Tag, +) Allocator.Error!Index { + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, enum_type.names); + + const values_map: OptionalMapIndex = if (enum_type.values.len == 0) .none else m: { + const values_map = try ip.addMap(gpa); + try addIndexesToMap(ip, gpa, values_map, enum_type.values); + break :m values_map.toOptional(); + }; + const fields_len = @intCast(u32, enum_type.names.len); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + + fields_len); + ip.items.appendAssumeCapacity(.{ + .tag = tag, + .data = ip.addExtraAssumeCapacity(EnumExplicit{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = enum_type.tag_ty, + .fields_len = fields_len, + .names_map = names_map, + .values_map = values_map, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.values)); + return @intToEnum(Index, ip.items.len - 1); +} + +pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { + const adapter: KeyAdapter = .{ .intern_pool = ip }; const index = ip.map.getIndexAdapted(key, adapter).?; return @intToEnum(Index, index); } @@ -1979,6 +2254,7 @@ fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex { pub fn remove(ip: *InternPool, index: Index) void { _ = ip; _ = index; + @setCold(true); @panic("TODO this is a bit problematic to implement, could we maybe just never support a remove() operation on InternPool?"); } @@ -2336,7 +2612,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_slice => 0, .type_optional => 0, .type_error_union => @sizeOf(ErrorUnion), - .type_enum_explicit => @sizeOf(EnumExplicit), + .type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit), .type_enum_auto => @sizeOf(EnumAuto), .type_opaque => @sizeOf(Key.OpaqueType), .type_struct => @sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), @@ -2448,3 +2724,50 @@ pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) // allocation failures here, instead leaking the Union until garbage collection. }; } + +pub fn getOrPutString( + ip: *InternPool, + gpa: Allocator, + s: []const u8, +) Allocator.Error!NullTerminatedString { + const string_bytes = &ip.string_bytes; + const str_index = @intCast(u32, string_bytes.items.len); + try string_bytes.ensureUnusedCapacity(gpa, s.len + 1); + string_bytes.appendSliceAssumeCapacity(s); + const key: []const u8 = string_bytes.items[str_index..]; + const gop = try ip.string_table.getOrPutContextAdapted(gpa, key, std.hash_map.StringIndexAdapter{ + .bytes = string_bytes, + }, std.hash_map.StringIndexContext{ + .bytes = string_bytes, + }); + if (gop.found_existing) { + string_bytes.shrinkRetainingCapacity(str_index); + return @intToEnum(NullTerminatedString, gop.key_ptr.*); + } else { + gop.key_ptr.* = str_index; + string_bytes.appendAssumeCapacity(0); + return @intToEnum(NullTerminatedString, str_index); + } +} + +pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString { + if (ip.string_table.getKeyAdapted(s, std.hash_map.StringIndexAdapter{ + .bytes = &ip.string_bytes, + })) |index| { + return @intToEnum(NullTerminatedString, index).toOptional(); + } else { + return .none; + } +} + +pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { + const string_bytes = ip.string_bytes.items; + const start = @enumToInt(s); + var end: usize = start; + while (string_bytes[end] != 0) end += 1; + return string_bytes[start..end :0]; +} + +pub fn typeOf(ip: InternPool, index: Index) Index { + return ip.indexToKey(index).typeOf(); +} diff --git a/src/Module.zig b/src/Module.zig index 6478f7ce4f56..6bcd148e67df 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -886,29 +886,17 @@ pub const Decl = struct { /// Only returns it if the Decl is the owner. pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex { if (!decl.owns_tv) return .none; - switch (decl.val.ip_index) { - .empty_struct_type => return .none, - .none => { - const ty = (decl.val.castTag(.ty) orelse return .none).data; - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; - return enum_obj.namespace.toOptional(); - }, - - else => return .none, - } - }, - else => return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + return switch (decl.val.ip_index) { + .empty_struct_type => .none, + .none => .none, + else => switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), .struct_type => |struct_type| struct_type.namespace, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - return union_obj.namespace.toOptional(); - }, + .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), + .enum_type => |enum_type| enum_type.namespace, else => .none, }, - } + }; } /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. @@ -1135,28 +1123,6 @@ pub const Struct = struct { return mod.declPtr(s.owner_decl).srcLoc(mod); } - pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc { - @setCold(true); - const owner_decl = mod.declPtr(s.owner_decl); - const file = owner_decl.getFileScope(mod); - const tree = file.getTree(mod.gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - file.sub_file_path, @errorName(err), - }); - return s.srcLoc(mod); - }; - const node = owner_decl.relativeToNodeIndex(0); - - var buf: [2]Ast.Node.Index = undefined; - if (tree.fullContainerDecl(&buf, node)) |container_decl| { - return queryFieldSrc(tree.*, query, file, container_decl); - } else { - // This struct was generated using @Type - return s.srcLoc(mod); - } - } - pub fn haveFieldTypes(s: Struct) bool { return switch (s.status) { .none, @@ -1237,110 +1203,6 @@ pub const Struct = struct { } }; -/// Represents the data that an enum declaration provides, when the fields -/// are auto-numbered, and there are no declarations. The integer tag type -/// is inferred to be the smallest power of two unsigned int that fits -/// the number of fields. -pub const EnumSimple = struct { - /// The Decl that corresponds to the enum itself. - owner_decl: Decl.Index, - /// Set of field names in declaration order. - fields: NameMap, - - pub const NameMap = EnumFull.NameMap; - - pub fn srcLoc(self: EnumSimple, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(mod), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } -}; - -/// Represents the data that an enum declaration provides, when there are no -/// declarations. However an integer tag type is provided, and the enum tag values -/// are explicitly provided. -pub const EnumNumbered = struct { - /// The Decl that corresponds to the enum itself. - owner_decl: Decl.Index, - /// An integer type which is used for the numerical value of the enum. - /// Whether zig chooses this type or the user specifies it, it is stored here. - tag_ty: Type, - /// Set of field names in declaration order. - fields: NameMap, - /// Maps integer tag value to field index. - /// Entries are in declaration order, same as `fields`. - /// If this hash map is empty, it means the enum tags are auto-numbered. - values: ValueMap, - - pub const NameMap = EnumFull.NameMap; - pub const ValueMap = EnumFull.ValueMap; - - pub fn srcLoc(self: EnumNumbered, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(mod), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } -}; - -/// Represents the data that an enum declaration provides, when there is -/// at least one tag value explicitly specified, or at least one declaration. -pub const EnumFull = struct { - /// The Decl that corresponds to the enum itself. - owner_decl: Decl.Index, - /// An integer type which is used for the numerical value of the enum. - /// Whether zig chooses this type or the user specifies it, it is stored here. - tag_ty: Type, - /// Set of field names in declaration order. - fields: NameMap, - /// Maps integer tag value to field index. - /// Entries are in declaration order, same as `fields`. - /// If this hash map is empty, it means the enum tags are auto-numbered. - values: ValueMap, - /// Represents the declarations inside this enum. - namespace: Namespace.Index, - /// true if zig inferred this tag type, false if user specified it - tag_ty_inferred: bool, - - pub const NameMap = std.StringArrayHashMapUnmanaged(void); - pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false); - - pub fn srcLoc(self: EnumFull, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(mod), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - pub fn fieldSrcLoc(e: EnumFull, mod: *Module, query: FieldSrcQuery) SrcLoc { - @setCold(true); - const owner_decl = mod.declPtr(e.owner_decl); - const file = owner_decl.getFileScope(mod); - const tree = file.getTree(mod.gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - file.sub_file_path, @errorName(err), - }); - return e.srcLoc(mod); - }; - const node = owner_decl.relativeToNodeIndex(0); - var buf: [2]Ast.Node.Index = undefined; - if (tree.fullContainerDecl(&buf, node)) |container_decl| { - return queryFieldSrc(tree.*, query, file, container_decl); - } else { - // This enum was generated using @Type - return e.srcLoc(mod); - } - } -}; - pub const Union = struct { /// An enum type which is used for the tag of the union. /// This type is created even for untagged unions, even when the memory @@ -1427,28 +1289,6 @@ pub const Union = struct { }; } - pub fn fieldSrcLoc(u: Union, mod: *Module, query: FieldSrcQuery) SrcLoc { - @setCold(true); - const owner_decl = mod.declPtr(u.owner_decl); - const file = owner_decl.getFileScope(mod); - const tree = file.getTree(mod.gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - file.sub_file_path, @errorName(err), - }); - return u.srcLoc(mod); - }; - const node = owner_decl.relativeToNodeIndex(0); - - var buf: [2]Ast.Node.Index = undefined; - if (tree.fullContainerDecl(&buf, node)) |container_decl| { - return queryFieldSrc(tree.*, query, file, container_decl); - } else { - // This union was generated using @Type - return u.srcLoc(mod); - } - } - pub fn haveFieldTypes(u: Union) bool { return switch (u.status) { .none, @@ -7313,3 +7153,24 @@ pub fn typeToUnion(mod: *Module, ty: Type) ?*Union { const union_index = mod.intern_pool.indexToUnion(ty.ip_index).unwrap() orelse return null; return mod.unionPtr(union_index); } + +pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { + @setCold(true); + const owner_decl = mod.declPtr(owner_decl_index); + const file = owner_decl.getFileScope(mod); + const tree = file.getTree(mod.gpa) catch |err| { + // In this case we emit a warning + a less precise source location. + log.warn("unable to load {s}: {s}", .{ + file.sub_file_path, @errorName(err), + }); + return owner_decl.srcLoc(mod); + }; + const node = owner_decl.relativeToNodeIndex(0); + var buf: [2]Ast.Node.Index = undefined; + if (tree.fullContainerDecl(&buf, node)) |container_decl| { + return queryFieldSrc(tree.*, query, file, container_decl); + } else { + // This type was generated using @Type + return owner_decl.srcLoc(mod); + } +} diff --git a/src/Sema.zig b/src/Sema.zig index e9bf66565e43..b94f995b46b5 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2096,7 +2096,7 @@ fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazyS errdefer msg.destroy(sema.gpa); const struct_ty = mod.typeToStruct(container_ty) orelse break :msg msg; - const default_value_src = struct_ty.fieldSrcLoc(mod, .{ + const default_value_src = mod.fieldSrcLoc(struct_ty.owner_decl, .{ .index = field_index, .range = .value, }); @@ -2875,50 +2875,28 @@ fn zirEnumDecl( break :blk decls_len; } else 0; - var done = false; - - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer if (!done) new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the enum type gains an + // InternPool index. - const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); - enum_ty_payload.* = .{ - .base = .{ .tag = if (small.nonexhaustive) .enum_nonexhaustive else .enum_full }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); + var done = false; const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = enum_val, + .val = undefined, }, small.name_strategy, "enum", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer if (!done) mod.abortAnonDecl(new_decl_index); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .tag_ty = Type.null, - .tag_ty_inferred = true, - .fields = .{}, - .values = .{}, - .namespace = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = enum_ty, - .file_scope = block.getFileScope(mod), - }), - }; - - try new_decl.finalizeNewArena(&new_decl_arena); - const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index); - done = true; - - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = new_decl.value_arena.?.acquire(gpa, &decl_arena); - defer new_decl.value_arena.?.release(&decl_arena); + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer if (!done) mod.destroyNamespace(new_namespace_index); - extra_index = try mod.scanNamespace(enum_obj.namespace, extra_index, decls_len, new_decl); + extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; @@ -2927,7 +2905,31 @@ fn zirEnumDecl( const body_end = extra_index; extra_index += bit_bags_count; - { + const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { + if (bag != 0) break true; + } else false; + + const incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{ + .decl = new_decl_index, + .namespace = new_namespace_index.toOptional(), + .fields_len = fields_len, + .has_values = any_values, + .tag_mode = if (small.nonexhaustive) + .nonexhaustive + else if (tag_type_ref == .none) + .auto + else + .explicit, + }); + errdefer if (!done) mod.intern_pool.remove(incomplete_enum.index); + + new_decl.val = incomplete_enum.index.toValue(); + new_namespace.ty = incomplete_enum.index.toType(); + + const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index); + done = true; + + const int_tag_ty = ty: { // We create a block for the field type instructions because they // may need to reference Decls from inside the enum namespace. // Within the field type, default value, and alignment expressions, the "owner decl" @@ -2957,7 +2959,7 @@ fn zirEnumDecl( .parent = null, .sema = sema, .src_decl = new_decl_index, - .namespace = enum_obj.namespace, + .namespace = new_namespace_index, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -2976,35 +2978,22 @@ fn zirEnumDecl( if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)}); } - enum_obj.tag_ty = try ty.copy(decl_arena_allocator); - enum_obj.tag_ty_inferred = false; + incomplete_enum.setTagType(&mod.intern_pool, ty.ip_index); + break :ty ty; } else if (fields_len == 0) { - enum_obj.tag_ty = try mod.intType(.unsigned, 0); - enum_obj.tag_ty_inferred = true; + break :ty try mod.intType(.unsigned, 0); } else { const bits = std.math.log2_int_ceil(usize, fields_len); - enum_obj.tag_ty = try mod.intType(.unsigned, bits); - enum_obj.tag_ty_inferred = true; + break :ty try mod.intType(.unsigned, bits); } - } + }; - if (small.nonexhaustive and enum_obj.tag_ty.zigTypeTag(mod) != .ComptimeInt) { - if (fields_len > 1 and std.math.log2_int(u64, fields_len) == enum_obj.tag_ty.bitSize(mod)) { + if (small.nonexhaustive and int_tag_ty.ip_index != .comptime_int_type) { + if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) { return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); } } - try enum_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); - const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { - if (bag != 0) break true; - } else false; - if (any_values) { - try enum_obj.values.ensureTotalCapacityContext(decl_arena_allocator, fields_len, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - } - var bit_bag_index: usize = body_end; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; @@ -3023,15 +3012,12 @@ fn zirEnumDecl( // doc comment extra_index += 1; - // This string needs to outlive the ZIR code. - const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); - - const gop_field = enum_obj.fields.getOrPutAssumeCapacity(field_name); - if (gop_field.found_existing) { - const field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const other_field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = gop_field.index }).lazy; + const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir); + if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name)) |other_index| { + const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; + const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, field_src, "duplicate enum field '{s}'", .{field_name}); + const msg = try sema.errMsg(block, field_src, "duplicate enum field '{s}'", .{field_name_zir}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other field here", .{}); break :msg msg; @@ -3045,7 +3031,7 @@ fn zirEnumDecl( const tag_inst = try sema.resolveInst(tag_val_ref); const tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) { error.NeededSourceLocation => { - const value_src = enum_obj.fieldSrcLoc(sema.mod, .{ + const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, }).lazy; @@ -3055,19 +3041,14 @@ fn zirEnumDecl( else => |e| return e, }; last_tag_val = tag_val; - const copied_tag_val = try tag_val.copy(decl_arena_allocator); - const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - if (gop_val.found_existing) { - const value_src = enum_obj.fieldSrcLoc(sema.mod, .{ + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.ip_index)) |other_index| { + const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, }).lazy; - const other_field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = gop_val.index }).lazy; + const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{tag_val.fmtValue(enum_obj.tag_ty, sema.mod)}); + const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{tag_val.fmtValue(int_tag_ty, sema.mod)}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other occurrence here", .{}); break :msg msg; @@ -3076,20 +3057,15 @@ fn zirEnumDecl( } } else if (any_values) { const tag_val = if (last_tag_val) |val| - try sema.intAdd(val, try mod.intValue(enum_obj.tag_ty, 1), enum_obj.tag_ty) + try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty) else - try mod.intValue(enum_obj.tag_ty, 0); + try mod.intValue(int_tag_ty, 0); last_tag_val = tag_val; - const copied_tag_val = try tag_val.copy(decl_arena_allocator); - const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - if (gop_val.found_existing) { - const field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const other_field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = gop_val.index }).lazy; + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.ip_index)) |other_index| { + const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; + const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{tag_val.fmtValue(enum_obj.tag_ty, sema.mod)}); + const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{tag_val.fmtValue(int_tag_ty, sema.mod)}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other occurrence here", .{}); break :msg msg; @@ -3097,16 +3073,16 @@ fn zirEnumDecl( return sema.failWithOwnedErrorMsg(msg); } } else { - last_tag_val = try mod.intValue(enum_obj.tag_ty, field_i); + last_tag_val = try mod.intValue(int_tag_ty, field_i); } - if (!(try sema.intFitsInType(last_tag_val.?, enum_obj.tag_ty, null))) { - const value_src = enum_obj.fieldSrcLoc(sema.mod, .{ + if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) { + const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = if (has_tag_value) .value else .name, }).lazy; const msg = try sema.errMsg(block, value_src, "enumeration value '{}' too large for type '{}'", .{ - last_tag_val.?.fmtValue(enum_obj.tag_ty, mod), enum_obj.tag_ty.fmt(mod), + last_tag_val.?.fmtValue(int_tag_ty, mod), int_tag_ty.fmt(mod), }); return sema.failWithOwnedErrorMsg(msg); } @@ -4356,7 +4332,7 @@ fn validateUnionInit( } const tag_ty = union_ty.unionTagTypeHypothetical(mod); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); if (init_val) |val| { @@ -8334,7 +8310,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); if (try sema.resolveMaybeUndefVal(operand)) |int_val| { - if (dest_ty.isNonexhaustiveEnum()) { + if (dest_ty.isNonexhaustiveEnum(mod)) { const int_tag_ty = try dest_ty.intTagType(mod); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { return sema.addConstant(dest_ty, int_val); @@ -8383,7 +8359,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.requireRuntimeBlock(block, src, operand_src); const result = try block.addTyOp(.intcast, dest_ty, operand); - if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum() and + if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum(mod) and sema.mod.backendSupportsFeature(.is_named_enum_value)) { const ok = try block.addUnOp(.is_named_enum_value, result); @@ -10518,7 +10494,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var else_error_ty: ?Type = null; // Validate usage of '_' prongs. - if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { + if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { const msg = msg: { const msg = try sema.errMsg( block, @@ -10543,8 +10519,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError switch (operand_ty.zigTypeTag(mod)) { .Union => unreachable, // handled in zirSwitchCond .Enum => { - seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount()); - empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(); + seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount(mod)); + empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(mod); @memset(seen_enum_fields, null); // `range_set` is used for non-exhaustive enum values that do not correspond to any tags. @@ -10599,7 +10575,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } else true; if (special_prong == .@"else") { - if (all_tags_handled and !operand_ty.isNonexhaustiveEnum()) return sema.fail( + if (all_tags_handled and !operand_ty.isNonexhaustiveEnum(mod)) return sema.fail( block, special_prong_src, "unreachable else prong; all cases already handled", @@ -10617,7 +10593,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError for (seen_enum_fields, 0..) |seen_src, i| { if (seen_src != null) continue; - const field_name = operand_ty.enumFieldName(i); + const field_name = operand_ty.enumFieldName(i, mod); try sema.addFieldErrNote( operand_ty, i, @@ -10635,7 +10611,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (special_prong == .none and operand_ty.isNonexhaustiveEnum() and !union_originally) { + } else if (special_prong == .none and operand_ty.isNonexhaustiveEnum(mod) and !union_originally) { return sema.fail( block, src, @@ -11159,7 +11135,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return Air.Inst.Ref.unreachable_value; } if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and - (!operand_ty.isNonexhaustiveEnum() or union_originally)) + (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { try sema.zirDbgStmt(block, cond_dbg_node_index); const ok = try block.addUnOp(.is_named_enum_value, operand); @@ -11489,7 +11465,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var emit_bb = false; if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) { .Enum => { - if (operand_ty.isNonexhaustiveEnum() and !union_originally) { + if (operand_ty.isNonexhaustiveEnum(mod) and !union_originally) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ operand_ty.fmt(mod), }); @@ -11629,7 +11605,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.inline_case_capture = .none; if (mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and - operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) + operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { try sema.zirDbgStmt(&case_block, cond_dbg_node_index); const ok = try case_block.addUnOp(.is_named_enum_value, operand); @@ -12081,7 +12057,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :hf switch (ty.zigTypeTag(mod)) { .Struct => ty.structFields(mod).contains(field_name), .Union => ty.unionFields(mod).contains(field_name), - .Enum => ty.enumFields().contains(field_name), + .Enum => ty.enumFieldIndex(field_name, mod) != null, .Array => mem.eql(u8, field_name, "len"), else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ ty.fmt(sema.mod), @@ -16300,9 +16276,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Enum => { // TODO: look into memoizing this result. - const int_tag_ty = try ty.intTagType(mod); + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; - const is_exhaustive = Value.makeBool(!ty.isNonexhaustiveEnum()); + const is_exhaustive = Value.makeBool(enum_type.tag_mode != .nonexhaustive); var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); @@ -16320,25 +16296,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t try enum_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; - const enum_fields = ty.enumFields(); - const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_fields.count()); + const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, i), - }; - const tag_val = Value.initPayload(&tag_val_payload.base); - - const int_val = try tag_val.enumToInt(ty, mod); - - const name = enum_fields.keys()[i]; + const name_ip = enum_type.names[i]; + const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16350,7 +16318,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // value: comptime_int, - int_val, + try mod.intValue(Type.comptime_int, i), }; field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), enum_field_fields); } @@ -16370,12 +16338,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :v try Value.Tag.decl_ref.create(sema.arena, new_decl); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace(mod)); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, enum_type.namespace); const field_values = try sema.arena.create([4]Value); field_values.* = .{ // tag_type: type, - try Value.Tag.ty.create(sema.arena, int_tag_ty), + enum_type.tag_ty.toValue(), // fields: []const EnumField, fields_val, // decls: []const Declaration, @@ -16468,7 +16436,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespace(mod)); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod)); const enum_tag_ty_val = if (union_ty.unionTagType(mod)) |tag_ty| v: { const ty_val = try Value.Tag.ty.create(sema.arena, tag_ty); @@ -16631,7 +16599,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespace(mod)); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespaceIndex(mod)); const backing_integer_val = blk: { if (layout == .Packed) { @@ -16674,7 +16642,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // TODO: look into memoizing this result. const opaque_ty = try sema.resolveTypeFields(ty); - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespace(mod)); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespaceIndex(mod)); const field_values = try sema.arena.create([1]Value); field_values.* = .{ @@ -16700,7 +16668,7 @@ fn typeInfoDecls( block: *Block, src: LazySrcLoc, type_info_ty: Type, - opt_namespace: ?*Module.Namespace, + opt_namespace: Module.Namespace.OptionalIndex, ) CompileError!Value { const mod = sema.mod; var decls_anon_decl = try block.startAnonDecl(); @@ -16726,8 +16694,9 @@ fn typeInfoDecls( var seen_namespaces = std.AutoHashMap(*Namespace, void).init(sema.gpa); defer seen_namespaces.deinit(); - if (opt_namespace) |some| { - try sema.typeInfoNamespaceDecls(block, decls_anon_decl.arena(), some, &decl_vals, &seen_namespaces); + if (opt_namespace.unwrap()) |namespace_index| { + const namespace = mod.namespacePtr(namespace_index); + try sema.typeInfoNamespaceDecls(block, decls_anon_decl.arena(), namespace, &decl_vals, &seen_namespaces); } const new_decl = try decls_anon_decl.finish( @@ -17896,7 +17865,7 @@ fn unionInit( if (try sema.resolveMaybeUndefVal(init)) |init_val| { const tag_ty = union_ty.unionTagTypeHypothetical(mod); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, @@ -17997,7 +17966,7 @@ fn zirStructInit( const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); const init_inst = try sema.resolveInst(item.data.init); @@ -18754,7 +18723,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air operand_ty.fmt(mod), }), }; - if (enum_ty.enumFieldCount() == 0) { + if (enum_ty.enumFieldCount(mod) == 0) { // TODO I don't think this is the correct way to handle this but // it prevents a crash. return sema.fail(block, operand_src, "cannot get @tagName of empty enum '{}'", .{ @@ -18776,7 +18745,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); }; - const field_name = enum_ty.enumFieldName(field_index); + const field_name = enum_ty.enumFieldName(field_index, mod); return sema.addStrLit(block, field_name); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -19081,63 +19050,41 @@ fn zirReify( return sema.fail(block, src, "reified enums must have no decls", .{}); } - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + const int_tag_ty = tag_type_val.toType(); + if (int_tag_ty.zigTypeTag(mod) != .Int) { + return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); + } + + // Because these things each reference each other, `undefined` + // placeholders are used before being set after the enum type gains + // an InternPool index. - // Define our empty enum decl - const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); - enum_ty_payload.* = .{ - .base = .{ - .tag = if (!is_exhaustive_val.toBool(mod)) - .enum_nonexhaustive - else - .enum_full, - }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = enum_val, + .val = undefined, }, name_strategy, "enum", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .tag_ty = Type.null, - .tag_ty_inferred = false, - .fields = .{}, - .values = .{}, - .namespace = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = enum_ty, - .file_scope = block.getFileScope(mod), - }), - }; - - // Enum tag type - const int_tag_ty = try tag_type_val.toType().copy(new_decl_arena_allocator); - - if (int_tag_ty.zigTypeTag(mod) != .Int) { - return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); - } - enum_obj.tag_ty = int_tag_ty; - - // Fields - const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ - .ty = enum_obj.tag_ty, - .mod = mod, + // Define our empty enum decl + const fields_len = @intCast(u32, try sema.usizeCast(block, src, fields_val.sliceLen(mod))); + const incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{ + .decl = new_decl_index, + .namespace = .none, + .fields_len = fields_len, + .has_values = true, + .tag_mode = if (!is_exhaustive_val.toBool(mod)) + .nonexhaustive + else + .explicit, + .tag_ty = int_tag_ty.ip_index, }); + errdefer mod.intern_pool.remove(incomplete_enum.index); - var field_i: usize = 0; - while (field_i < fields_len) : (field_i += 1) { + new_decl.val = incomplete_enum.index.toValue(); + + for (0..fields_len) |field_i| { const elem_val = try fields_val.elemValue(mod, field_i); const field_struct_val: []const Value = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here @@ -19148,39 +19095,36 @@ fn zirReify( const field_name = try name_val.toAllocatedBytes( Type.const_slice_u8, - new_decl_arena_allocator, + sema.arena, mod, ); + const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name); - if (!try sema.intFitsInType(value_val, enum_obj.tag_ty, null)) { + if (!try sema.intFitsInType(value_val, int_tag_ty, null)) { // TODO: better source location return sema.fail(block, src, "field '{s}' with enumeration value '{}' is too large for backing int type '{}'", .{ field_name, value_val.fmtValue(Type.comptime_int, mod), - enum_obj.tag_ty.fmt(mod), + int_tag_ty.fmt(mod), }); } - const gop_field = enum_obj.fields.getOrPutAssumeCapacity(field_name); - if (gop_field.found_existing) { + if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name_ip)) |other_index| { const msg = msg: { const msg = try sema.errMsg(block, src, "duplicate enum field '{s}'", .{field_name}); errdefer msg.destroy(gpa); + _ = other_index; // TODO: this note is incorrect try sema.errNote(block, src, msg, "other field here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - const copied_tag_val = try value_val.copy(new_decl_arena_allocator); - const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - if (gop_val.found_existing) { + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, value_val.ip_index)) |other| { const msg = msg: { const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)}); errdefer msg.destroy(gpa); + _ = other; // TODO: this note is incorrect try sema.errNote(block, src, msg, "other enum tag value here", .{}); break :msg msg; }; @@ -19188,7 +19132,6 @@ fn zirReify( } } - try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); }, .Opaque => { @@ -19307,26 +19250,29 @@ fn zirReify( new_namespace.ty = union_ty.toType(); // Tag type - var tag_ty_field_names: ?Module.EnumFull.NameMap = null; - var enum_field_names: ?*Module.EnumNumbered.NameMap = null; const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); + var explicit_tags_seen: []bool = &.{}; + var explicit_enum_info: ?InternPool.Key.EnumType = null; + var enum_field_names: []InternPool.NullTerminatedString = &.{}; if (tag_type_val.optionalValue(mod)) |payload_val| { - union_obj.tag_ty = try payload_val.toType().copy(new_decl_arena_allocator); + union_obj.tag_ty = payload_val.toType(); - if (union_obj.tag_ty.zigTypeTag(mod) != .Enum) { - return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}); - } - tag_ty_field_names = try union_obj.tag_ty.enumFields().clone(sema.arena); + const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) { + .enum_type => |x| x, + else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}), + }; + + explicit_enum_info = enum_type; + explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); + @memset(explicit_tags_seen, false); } else { - union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, fields_len, null); - enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields; + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); } // Fields try union_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - var i: usize = 0; - while (i < fields_len) : (i += 1) { + for (0..fields_len) |i| { const elem_val = try fields_val.elemValue(mod, i); const field_struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here @@ -19343,13 +19289,14 @@ fn zirReify( mod, ); - if (enum_field_names) |set| { - set.putAssumeCapacity(field_name, {}); + const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name); + + if (enum_field_names.len != 0) { + enum_field_names[i] = field_name_ip; } - if (tag_ty_field_names) |*names| { - const enum_has_field = names.orderedRemove(field_name); - if (!enum_has_field) { + if (explicit_enum_info) |tag_info| { + const enum_index = tag_info.nameIndex(mod.intern_pool, field_name_ip) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) }); errdefer msg.destroy(gpa); @@ -19357,7 +19304,11 @@ fn zirReify( break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } + }; + // No check for duplicate because the check already happened in order + // to create the enum type in the first place. + assert(!explicit_tags_seen[enum_index]); + explicit_tags_seen[enum_index] = true; } const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); @@ -19409,22 +19360,26 @@ fn zirReify( } } - if (tag_ty_field_names) |names| { - if (names.count() > 0) { + if (explicit_enum_info) |tag_info| { + if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{}); errdefer msg.destroy(gpa); const enum_ty = union_obj.tag_ty; - for (names.keys()) |field_name| { - const field_index = enum_ty.enumFieldIndex(field_name).?; - try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{field_name}); + for (tag_info.names, 0..) |field_name, field_index| { + if (explicit_tags_seen[field_index]) continue; + try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{ + mod.intern_pool.stringToSlice(field_name), + }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + } else { + union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, enum_field_names, null); } try new_decl.finalizeNewArena(&new_decl_arena); @@ -23450,7 +23405,7 @@ fn explainWhyTypeIsComptimeInner( if (mod.typeToStruct(ty)) |struct_obj| { for (struct_obj.fields.values(), 0..) |field, i| { - const field_src_loc = struct_obj.fieldSrcLoc(sema.mod, .{ + const field_src_loc = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = i, .range = .type, }); @@ -23469,7 +23424,7 @@ fn explainWhyTypeIsComptimeInner( if (mod.typeToUnion(ty)) |union_obj| { for (union_obj.fields.values(), 0..) |field, i| { - const field_src_loc = union_obj.fieldSrcLoc(sema.mod, .{ + const field_src_loc = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = i, .range = .type, }); @@ -24168,7 +24123,7 @@ fn fieldVal( } const union_ty = try sema.resolveTypeFields(child_type); if (union_ty.unionTagType(mod)) |enum_ty| { - if (enum_ty.enumFieldIndex(field_name)) |field_index_usize| { + if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| { const field_index = @intCast(u32, field_index_usize); return sema.addConstant( enum_ty, @@ -24184,7 +24139,7 @@ fn fieldVal( return inst; } } - const field_index_usize = child_type.enumFieldIndex(field_name) orelse + const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); const field_index = @intCast(u32, field_index_usize); const enum_val = try Value.Tag.enum_field_index.create(arena, field_index); @@ -24382,7 +24337,7 @@ fn fieldPtr( } const union_ty = try sema.resolveTypeFields(child_type); if (union_ty.unionTagType(mod)) |enum_ty| { - if (enum_ty.enumFieldIndex(field_name)) |field_index| { + if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { const field_index_u32 = @intCast(u32, field_index); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -24401,7 +24356,7 @@ fn fieldPtr( return inst; } } - const field_index = child_type.enumFieldIndex(field_name) orelse { + const field_index = child_type.enumFieldIndex(field_name, mod) orelse { return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }; const field_index_u32 = @intCast(u32, field_index); @@ -24996,7 +24951,7 @@ fn unionFieldPtr( .@"volatile" = union_ptr_ty.isVolatilePtr(mod), .@"addrspace" = union_ptr_ty.ptrAddressSpace(mod), }); - const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { @@ -25028,7 +24983,7 @@ fn unionFieldPtr( if (!tag_matches) { const msg = msg: { const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; - const active_field_name = union_obj.tag_ty.enumFieldName(active_index); + const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); @@ -25083,7 +25038,7 @@ fn unionFieldVal( const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; - const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| { if (union_val.isUndef()) return sema.addConstUndef(field.ty); @@ -25102,7 +25057,7 @@ fn unionFieldVal( } else { const msg = msg: { const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; - const active_field_name = union_obj.tag_ty.enumFieldName(active_index); + const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); @@ -26191,7 +26146,7 @@ fn coerceExtra( // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); const bytes = val.castTag(.enum_literal).?.data; - const field_index = dest_ty.enumFieldIndex(bytes) orelse { + const field_index = dest_ty.enumFieldIndex(bytes, mod) orelse { const msg = msg: { const msg = try sema.errMsg( block, @@ -28707,7 +28662,7 @@ fn coerceEnumToUnion( try sema.requireRuntimeBlock(block, inst_src, null); - if (tag_ty.isNonexhaustiveEnum()) { + if (tag_ty.isNonexhaustiveEnum(mod)) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "runtime coercion to union '{}' from non-exhaustive enum", .{ union_ty.fmt(sema.mod), @@ -31605,7 +31560,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_single, .error_set_inferred, .error_set_merged, - .enum_simple, => false, .function => true, @@ -31646,14 +31600,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const child_ty = ty.castTag(.anyframe_T).?.data; return sema.resolveTypeRequiresComptime(child_ty); }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return sema.resolveTypeRequiresComptime(tag_ty); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return sema.resolveTypeRequiresComptime(tag_ty); - }, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, @@ -31760,7 +31706,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .opaque_type => false, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()), // values, not types .un => unreachable, @@ -32284,12 +32230,12 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const msg = msg: { - const field_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; + const field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i }).lazy; const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{s}'", .{field_name}); errdefer msg.destroy(gpa); const prev_field_index = struct_obj.fields.getIndex(field_name).?; - const prev_field_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = prev_field_index }); + const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index }); try sema.mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "struct declared here", .{}); break :msg msg; @@ -32325,7 +32271,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void if (zir_field.type_ref != .none) { break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32341,7 +32287,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const ty_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index); break :ty sema.analyzeAsType(&block_scope, .unneeded, ty_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32360,7 +32306,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32374,7 +32320,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32388,7 +32334,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } if (struct_obj.layout == .Extern and !try sema.validateExternType(field.ty, .struct_field)) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }); @@ -32403,7 +32349,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void return sema.failWithOwnedErrorMsg(msg); } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }); @@ -32424,7 +32370,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const align_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index); field.abi_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const align_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const align_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .alignment, }).lazy; @@ -32452,7 +32398,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const field = &struct_obj.fields.values()[field_i]; const coerced = sema.coerce(&block_scope, field.ty, init, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const init_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .value, }).lazy; @@ -32462,7 +32408,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void else => |e| return e, }; const default_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { - const init_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .value, }).lazy; @@ -32573,9 +32519,11 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); var int_tag_ty: Type = undefined; - var enum_field_names: ?*Module.EnumNumbered.NameMap = null; - var enum_value_map: ?*Module.EnumNumbered.ValueMap = null; - var tag_ty_field_names: ?Module.EnumFull.NameMap = null; + var enum_field_names: []InternPool.NullTerminatedString = &.{}; + var enum_field_vals: []InternPool.Index = &.{}; + var enum_field_vals_map: std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false) = .{}; + var explicit_tags_seen: []bool = &.{}; + var explicit_enum_info: ?InternPool.Key.EnumType = null; if (tag_type_ref != .none) { const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x }; const provided_ty = try sema.resolveType(&block_scope, tag_ty_src, tag_type_ref); @@ -32601,27 +32549,26 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { return sema.failWithOwnedErrorMsg(msg); } } - union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, fields_len, provided_ty, union_obj); - const enum_obj = union_obj.tag_ty.castTag(.enum_numbered).?.data; - enum_field_names = &enum_obj.fields; - enum_value_map = &enum_obj.values; + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); + enum_field_vals = try sema.arena.alloc(InternPool.Index, fields_len); } else { // The provided type is the enum tag type. - union_obj.tag_ty = try provided_ty.copy(decl_arena_allocator); - if (union_obj.tag_ty.zigTypeTag(mod) != .Enum) { - return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}); - } + union_obj.tag_ty = provided_ty; + const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) { + .enum_type => |x| x, + else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}), + }; // The fields of the union must match the enum exactly. - // Store a copy of the enum field names so we can check for - // missing or extraneous fields later. - tag_ty_field_names = try union_obj.tag_ty.enumFields().clone(sema.arena); + // A flag per field is used to check for missing and extraneous fields. + explicit_enum_info = enum_type; + explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); + @memset(explicit_tags_seen, false); } } else { // If auto_enum_tag is false, this is an untagged union. However, for semantic analysis // purposes, we still auto-generate an enum tag type the same way. That the union is // untagged is represented by the Type tag (union vs union_tagged). - union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, fields_len, union_obj); - enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields; + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); } if (fields_len == 0) { @@ -32675,11 +32622,11 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk try sema.resolveInst(tag_ref); } else .none; - if (enum_value_map) |map| { + if (enum_field_vals.len != 0) { const copied_val = if (tag_ref != .none) blk: { const val = sema.semaUnionFieldVal(&block_scope, .unneeded, int_tag_ty, tag_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const val_src = union_obj.fieldSrcLoc(sema.mod, .{ + const val_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .value, }).lazy; @@ -32690,25 +32637,24 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { }; last_tag_val = val; - // This puts the memory into the union arena, not the enum arena, but - // it is OK since they share the same lifetime. - break :blk try val.copy(decl_arena_allocator); + break :blk val; } else blk: { const val = if (last_tag_val) |val| - try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty) + try sema.intAdd(val, Value.one_comptime_int, int_tag_ty) else try mod.intValue(int_tag_ty, 0); last_tag_val = val; - break :blk try val.copy(decl_arena_allocator); + break :blk val; }; - const gop = map.getOrPutAssumeCapacityContext(copied_val, .{ + enum_field_vals[field_i] = copied_val.ip_index; + const gop = enum_field_vals_map.getOrPutAssumeCapacityContext(copied_val, .{ .ty = int_tag_ty, .mod = mod, }); if (gop.found_existing) { - const field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const other_field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = gop.index }).lazy; + const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; + const other_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = gop.index }).lazy; const msg = msg: { const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, sema.mod)}); errdefer msg.destroy(gpa); @@ -32721,8 +32667,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { // This string needs to outlive the ZIR code. const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); - if (enum_field_names) |set| { - set.putAssumeCapacity(field_name, {}); + const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name); + if (enum_field_names.len != 0) { + enum_field_names[field_i] = field_name_ip; } const field_ty: Type = if (!has_type) @@ -32732,7 +32679,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { else sema.resolveType(&block_scope, .unneeded, field_type_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32749,12 +32696,12 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const msg = msg: { - const field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; + const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{s}'", .{field_name}); errdefer msg.destroy(gpa); const prev_field_index = union_obj.fields.getIndex(field_name).?; - const prev_field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = prev_field_index }).lazy; + const prev_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = prev_field_index }).lazy; try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "union declared here", .{}); break :msg msg; @@ -32762,26 +32709,31 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { return sema.failWithOwnedErrorMsg(msg); } - if (tag_ty_field_names) |*names| { - const enum_has_field = names.orderedRemove(field_name); - if (!enum_has_field) { + if (explicit_enum_info) |tag_info| { + const enum_index = tag_info.nameIndex(mod.intern_pool, field_name_ip) orelse { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; - const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{ + field_name, union_obj.tag_ty.fmt(sema.mod), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } + }; + // No check for duplicate because the check already happened in order + // to create the enum type in the first place. + assert(!explicit_tags_seen[enum_index]); + explicit_tags_seen[enum_index] = true; } if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32795,7 +32747,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }); @@ -32810,7 +32762,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { return sema.failWithOwnedErrorMsg(msg); } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }); @@ -32833,7 +32785,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { if (align_ref != .none) { gop.value_ptr.abi_align = sema.resolveAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const align_src = union_obj.fieldSrcLoc(sema.mod, .{ + const align_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .alignment, }).lazy; @@ -32847,22 +32799,28 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } } - if (tag_ty_field_names) |names| { - if (names.count() > 0) { + if (explicit_enum_info) |tag_info| { + if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(&block_scope, src, "enum field(s) missing in union", .{}); errdefer msg.destroy(sema.gpa); const enum_ty = union_obj.tag_ty; - for (names.keys()) |field_name| { - const field_index = enum_ty.enumFieldIndex(field_name).?; - try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{field_name}); + for (tag_info.names, 0..) |field_name, field_index| { + if (explicit_tags_seen[field_index]) continue; + try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{ + mod.intern_pool.stringToSlice(field_name), + }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + } else if (enum_field_vals.len != 0) { + union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals, union_obj); + } else { + union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, union_obj); } } @@ -32874,25 +32832,12 @@ fn semaUnionFieldVal(sema: *Sema, block: *Block, src: LazySrcLoc, int_tag_ty: Ty fn generateUnionTagTypeNumbered( sema: *Sema, block: *Block, - fields_len: u32, - int_ty: Type, + enum_field_names: []const InternPool.NullTerminatedString, + enum_field_vals: []const InternPool.Index, union_obj: *Module.Union, ) !Type { const mod = sema.mod; - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const enum_obj = try new_decl_arena_allocator.create(Module.EnumNumbered); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumNumbered); - enum_ty_payload.* = .{ - .base = .{ .tag = .enum_numbered }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); - const src_decl = mod.declPtr(block.src_decl); const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope); errdefer mod.destroyDecl(new_decl_index); @@ -32903,53 +32848,45 @@ fn generateUnionTagTypeNumbered( }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, - .val = enum_val, + .val = undefined, }, name); - sema.mod.declPtr(new_decl_index).name_fully_qualified = true; - const new_decl = mod.declPtr(new_decl_index); + new_decl.name_fully_qualified = true; new_decl.owns_tv = true; new_decl.name_fully_qualified = true; errdefer mod.abortAnonDecl(new_decl_index); - const copied_int_ty = try int_ty.copy(new_decl_arena_allocator); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .tag_ty = copied_int_ty, - .fields = .{}, - .values = .{}, - }; - // Here we pre-allocate the maps using the decl arena. - try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ - .ty = copied_int_ty, - .mod = mod, - }); - try new_decl.finalizeNewArena(&new_decl_arena); - return enum_ty; -} + const enum_ty = try mod.intern(.{ .enum_type = .{ + .decl = new_decl_index, + .namespace = .none, + .tag_ty = if (enum_field_vals.len == 0) + .noreturn_type + else + mod.intern_pool.typeOf(enum_field_vals[0]), + .names = enum_field_names, + .values = enum_field_vals, + .tag_mode = .explicit, + } }); + errdefer mod.intern_pool.remove(enum_ty); -fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: usize, maybe_union_obj: ?*Module.Union) !Type { - const mod = sema.mod; + new_decl.val = enum_ty.toValue(); - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + return enum_ty.toType(); +} - const enum_obj = try new_decl_arena_allocator.create(Module.EnumSimple); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumSimple); - enum_ty_payload.* = .{ - .base = .{ .tag = .enum_simple }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); +fn generateUnionTagTypeSimple( + sema: *Sema, + block: *Block, + enum_field_names: []const InternPool.NullTerminatedString, + maybe_union_obj: ?*Module.Union, +) !Type { + const mod = sema.mod; const new_decl_index = new_decl_index: { const union_obj = maybe_union_obj orelse { break :new_decl_index try mod.createAnonymousDecl(block, .{ .ty = Type.type, - .val = enum_val, + .val = undefined, }); }; const src_decl = mod.declPtr(block.src_decl); @@ -32962,24 +32899,31 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: usize, may }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, - .val = enum_val, + .val = undefined, }, name); - sema.mod.declPtr(new_decl_index).name_fully_qualified = true; + mod.declPtr(new_decl_index).name_fully_qualified = true; break :new_decl_index new_decl_index; }; + const enum_ty = try mod.intern(.{ .enum_type = .{ + .decl = new_decl_index, + .namespace = .none, + .tag_ty = if (enum_field_names.len == 0) + .noreturn_type + else + (try mod.smallestUnsignedInt(enum_field_names.len - 1)).ip_index, + .names = enum_field_names, + .values = &.{}, + .tag_mode = .auto, + } }); + errdefer mod.intern_pool.remove(enum_ty); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; + new_decl.val = enum_ty.toValue(); errdefer mod.abortAnonDecl(new_decl_index); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .fields = .{}, - }; - // Here we pre-allocate the maps using the decl arena. - try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - try new_decl.finalizeNewArena(&new_decl_arena); - return enum_ty; + return enum_ty.toType(); } fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { @@ -33098,57 +33042,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return Value.empty_struct; }, - .enum_numbered => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; - // An explicit tag type is always provided for enum_numbered. - if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { - return null; - } - if (enum_obj.fields.count() == 1) { - if (enum_obj.values.count() == 0) { - return Value.enum_field_0; // auto-numbered - } else { - return enum_obj.values.keys()[0]; - } - } else { - return null; - } - }, - .enum_full => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_obj = resolved_ty.castTag(.enum_full).?.data; - if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { - return null; - } - switch (enum_obj.fields.count()) { - 0 => return Value.@"unreachable", - 1 => if (enum_obj.values.count() == 0) { - return Value.enum_field_0; // auto-numbered - } else { - return enum_obj.values.keys()[0]; - }, - else => return null, - } - }, - .enum_simple => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_simple = resolved_ty.castTag(.enum_simple).?.data; - switch (enum_simple.fields.count()) { - 0 => return Value.@"unreachable", - 1 => return Value.enum_field_0, - else => return null, - } - }, - .enum_nonexhaustive => { - const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { - return Value.enum_field_0; - } else { - return null; - } - }, - .array => { if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); @@ -33295,7 +33188,28 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return only.toValue(); }, .opaque_type => null, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .nonexhaustive => { + if (enum_type.tag_ty != .comptime_int_type and + !(try sema.typeHasRuntimeBits(enum_type.tag_ty.toType()))) + { + return Value.enum_field_0; + } else { + return null; + } + }, + .auto, .explicit => switch (enum_type.names.len) { + 0 => return Value.@"unreachable", + 1 => { + if (enum_type.values.len == 0) { + return Value.enum_field_0; // auto-numbered + } else { + return enum_type.values[0].toValue(); + } + }, + else => return null, + }, + }, // values, not types .un => unreachable, @@ -33701,7 +33615,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_single, .error_set_inferred, .error_set_merged, - .enum_simple, => false, .function => true, @@ -33742,14 +33655,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const child_ty = ty.castTag(.anyframe_T).?.data; return sema.typeRequiresComptime(child_ty); }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return sema.typeRequiresComptime(tag_ty); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return sema.typeRequiresComptime(tag_ty); - }, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return false, @@ -33865,7 +33770,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, .opaque_type => false, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()), // values, not types .un => unreachable, @@ -34435,42 +34340,19 @@ fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { /// Asserts the type is an enum. fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { const mod = sema.mod; - switch (ty.tag()) { - .enum_nonexhaustive => unreachable, - .enum_full => { - const enum_full = ty.castTag(.enum_full).?.data; - const tag_ty = enum_full.tag_ty; - if (enum_full.values.count() == 0) { - return sema.intInRange(tag_ty, int, enum_full.fields.count()); - } else { - return enum_full.values.containsContext(int, .{ - .ty = tag_ty, - .mod = sema.mod, - }); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - const tag_ty = enum_obj.tag_ty; - if (enum_obj.values.count() == 0) { - return sema.intInRange(tag_ty, int, enum_obj.fields.count()); - } else { - return enum_obj.values.containsContext(int, .{ - .ty = tag_ty, - .mod = sema.mod, - }); - } - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const fields_len = enum_simple.fields.count(); - const bits = std.math.log2_int_ceil(usize, fields_len); - const tag_ty = try mod.intType(.unsigned, bits); - return sema.intInRange(tag_ty, int, fields_len); - }, - - else => unreachable, + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + assert(enum_type.tag_mode != .nonexhaustive); + if (enum_type.values.len == 0) { + // auto-numbered + return sema.intInRange(enum_type.tag_ty.toType(), int, enum_type.names.len); } + + // The `tagValueIndex` function call below relies on the type being the integer tag type. + // `getCoerced` assumes the value will fit the new type. + if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false; + const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.ip_index, enum_type.tag_ty); + + return enum_type.tagValueIndex(mod.intern_pool, int_coerced) != null; } fn intAddWithOverflow( diff --git a/src/TypedValue.zig b/src/TypedValue.zig index cf9888f35703..5f295e42f30e 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -198,7 +198,7 @@ pub fn print( .empty_array => return writer.writeAll(".{}"), .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), .enum_field_index => { - return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data)}); + return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data, mod)}); }, .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .str_lit => { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index c1409e49776c..a2f4f81053ee 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3101,24 +3101,12 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, .Enum => { if (val.castTag(.enum_field_index)) |field_index| { - switch (ty.tag()) { - .enum_simple => return WValue{ .imm32 = field_index.data }, - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index.data]; - return func.lowerConstant(tag_val, enum_full.tag_ty); - } else { - return WValue{ .imm32 = field_index.data }; - } - }, - .enum_numbered => { - const index = field_index.data; - const enum_data = ty.castTag(.enum_numbered).?.data; - const enum_val = enum_data.values.keys()[index]; - return func.lowerConstant(enum_val, enum_data.tag_ty); - }, - else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}), + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + if (enum_type.values.len != 0) { + const tag_val = enum_type.values[field_index.data]; + return func.lowerConstant(tag_val.toValue(), enum_type.tag_ty.toType()); + } else { + return WValue{ .imm32 = field_index.data }; } } else { const int_tag_ty = try ty.intTagType(mod); @@ -3240,21 +3228,12 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) !i32 { switch (ty.zigTypeTag(mod)) { .Enum => { if (val.castTag(.enum_field_index)) |field_index| { - switch (ty.tag()) { - .enum_simple => return @bitCast(i32, field_index.data), - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index.data]; - return func.valueAsI32(tag_val, enum_full.tag_ty); - } else return @bitCast(i32, field_index.data); - }, - .enum_numbered => { - const index = field_index.data; - const enum_data = ty.castTag(.enum_numbered).?.data; - return func.valueAsI32(enum_data.values.keys()[index], enum_data.tag_ty); - }, - else => unreachable, + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + if (enum_type.values.len != 0) { + const tag_val = enum_type.values[field_index.data]; + return func.valueAsI32(tag_val.toValue(), enum_type.tag_ty.toType()); + } else { + return @bitCast(i32, field_index.data); } } else { const int_tag_ty = try ty.intTagType(mod); @@ -6836,7 +6815,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse. // generate an if-else chain for each tag value as well as constant. - for (enum_ty.enumFields().keys(), 0..) |tag_name, field_index| { + for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index| { + const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); // for each tag name, create an unnamed const, // and then get a pointer to its value. const name_ty = try mod.arrayType(.{ @@ -6846,7 +6826,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { }); const string_bytes = &mod.string_literal_bytes; try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len); - const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, tag_name, Module.StringLiteralAdapter{ + const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, @as([]const u8, tag_name), Module.StringLiteralAdapter{ .bytes = string_bytes, }, Module.StringLiteralContext{ .bytes = string_bytes, diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 7b93ff205974..72f416ca8763 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2016,7 +2016,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { const ret_reg = param_regs[0]; const enum_mcv = MCValue{ .register = param_regs[1] }; - var exitlude_jump_relocs = try self.gpa.alloc(u32, enum_ty.enumFieldCount()); + var exitlude_jump_relocs = try self.gpa.alloc(u32, enum_ty.enumFieldCount(mod)); defer self.gpa.free(exitlude_jump_relocs); const data_reg = try self.register_manager.allocReg(null, gp); @@ -2027,9 +2027,10 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { var data_off: i32 = 0; for ( exitlude_jump_relocs, - enum_ty.enumFields().keys(), + enum_ty.enumFields(mod), 0.., - ) |*exitlude_jump_reloc, tag_name, index| { + ) |*exitlude_jump_reloc, tag_name_ip, index| { + const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = @intCast(u32, index), @@ -11413,7 +11414,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const union_obj = mod.typeToUnion(union_ty).?; const field_name = union_obj.fields.keys()[extra.field_index]; const tag_ty = union_obj.tag_ty; - const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); + const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index }; const tag_val = Value.initPayload(&tag_pl.base); const tag_int_val = try tag_val.enumToInt(tag_ty, mod); diff --git a/src/codegen.zig b/src/codegen.zig index 5c022392bfa2..148a69016ac1 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -156,7 +156,8 @@ pub fn generateLazySymbol( return Result.ok; } else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) { alignment.* = 1; - for (lazy_sym.ty.enumFields().keys()) |tag_name| { + for (lazy_sym.ty.enumFields(mod)) |tag_name_ip| { + const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); try code.ensureUnusedCapacity(tag_name.len + 1); code.appendSliceAssumeCapacity(tag_name); code.appendAssumeCapacity(0); @@ -1229,26 +1230,15 @@ pub fn genTypedValue( }, .Enum => { if (typed_value.val.castTag(.enum_field_index)) |field_index| { - switch (typed_value.ty.tag()) { - .enum_simple => { - return GenResult.mcv(.{ .immediate = field_index.data }); - }, - .enum_numbered, .enum_full, .enum_nonexhaustive => { - const enum_values = if (typed_value.ty.castTag(.enum_numbered)) |pl| - pl.data.values - else - typed_value.ty.cast(Type.Payload.EnumFull).?.data.values; - if (enum_values.count() != 0) { - const tag_val = enum_values.keys()[field_index.data]; - return genTypedValue(bin_file, src_loc, .{ - .ty = try typed_value.ty.intTagType(mod), - .val = tag_val, - }, owner_decl_index); - } else { - return GenResult.mcv(.{ .immediate = field_index.data }); - } - }, - else => unreachable, + const enum_type = mod.intern_pool.indexToKey(typed_value.ty.ip_index).enum_type; + if (enum_type.values.len != 0) { + const tag_val = enum_type.values[field_index.data]; + return genTypedValue(bin_file, src_loc, .{ + .ty = enum_type.tag_ty.toType(), + .val = tag_val.toValue(), + }, owner_decl_index); + } else { + return GenResult.mcv(.{ .immediate = field_index.data }); } } else { const int_tag_ty = try typed_value.ty.intTagType(mod); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 872bdb94d385..a67d39471ad2 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1288,27 +1288,12 @@ pub const DeclGen = struct { switch (val.tag()) { .enum_field_index => { const field_index = val.castTag(.enum_field_index).?.data; - switch (ty.tag()) { - .enum_simple => return writer.print("{d}", .{field_index}), - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index]; - return dg.renderValue(writer, enum_full.tag_ty, tag_val, location); - } else { - return writer.print("{d}", .{field_index}); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - if (enum_obj.values.count() != 0) { - const tag_val = enum_obj.values.keys()[field_index]; - return dg.renderValue(writer, enum_obj.tag_ty, tag_val, location); - } else { - return writer.print("{d}", .{field_index}); - } - }, - else => unreachable, + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + if (enum_type.values.len != 0) { + const tag_val = enum_type.values[field_index]; + return dg.renderValue(writer, enum_type.tag_ty.toType(), tag_val.toValue(), location); + } else { + return writer.print("{d}", .{field_index}); } }, else => { @@ -2539,7 +2524,8 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try w.writeByte('('); try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete); try w.writeAll(") {\n switch (tag) {\n"); - for (enum_ty.enumFields().keys(), 0..) |name, index| { + for (enum_ty.enumFields(mod), 0..) |name_ip, index| { + const name = mod.intern_pool.stringToSlice(name_ip); var tag_pl: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, .data = @intCast(u32, index), @@ -6930,7 +6916,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { const field: CValue = if (union_ty.unionTagTypeSafety(mod)) |tag_ty| field: { const layout = union_ty.unionGetLayout(mod); if (layout.tag_size != 0) { - const field_index = tag_ty.enumFieldIndex(field_name).?; + const field_index = tag_ty.enumFieldIndex(field_name, mod).?; var tag_pl: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c2992534424f..583c08583c31 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1516,30 +1516,25 @@ pub const Object = struct { return enum_di_ty; } - const field_names = ty.enumFields().keys(); + const ip = &mod.intern_pool; + const enum_type = ip.indexToKey(ty.ip_index).enum_type; - const enumerators = try gpa.alloc(*llvm.DIEnumerator, field_names.len); + const enumerators = try gpa.alloc(*llvm.DIEnumerator, enum_type.names.len); defer gpa.free(enumerators); - var buf_field_index: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = undefined, - }; - const field_index_val = Value.initPayload(&buf_field_index.base); - - const int_ty = try ty.intTagType(mod); + const int_ty = enum_type.tag_ty.toType(); const int_info = ty.intInfo(mod); assert(int_info.bits != 0); - for (field_names, 0..) |field_name, i| { - const field_name_z = try gpa.dupeZ(u8, field_name); - defer gpa.free(field_name_z); + for (enum_type.names, 0..) |field_name_ip, i| { + const field_name_z = ip.stringToSlice(field_name_ip); - buf_field_index.data = @intCast(u32, i); - const field_int_val = try field_index_val.enumToInt(ty, mod); - - var bigint_space: Value.BigIntSpace = undefined; - const bigint = field_int_val.toBigInt(&bigint_space, mod); + var bigint_space: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const storage = if (enum_type.values.len != 0) + ip.indexToKey(enum_type.values[i]).int.storage + else + InternPool.Key.Int.Storage{ .u64 = i }; + const bigint = storage.toBigInt(&bigint_space); if (bigint.limbs.len == 1) { enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned); @@ -8852,23 +8847,22 @@ pub const FuncGen = struct { fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { const mod = self.dg.module; - const enum_decl = enum_ty.getOwnerDecl(mod); + const enum_type = mod.intern_pool.indexToKey(enum_ty.ip_index).enum_type; // TODO: detect when the type changes and re-emit this function. - const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_decl); + const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_type.decl); if (gop.found_existing) return gop.value_ptr.*; - errdefer assert(self.dg.object.named_enum_map.remove(enum_decl)); + errdefer assert(self.dg.object.named_enum_map.remove(enum_type.decl)); var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_decl).getFullyQualifiedName(mod); + const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); - const int_tag_ty = try enum_ty.intTagType(mod); - const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; + const param_types = [_]*llvm.Type{try self.dg.lowerType(enum_type.tag_ty.toType())}; const llvm_ret_ty = try self.dg.lowerType(Type.bool); const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); @@ -8891,13 +8885,12 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(entry_block); self.builder.clearCurrentDebugLocation(); - const fields = enum_ty.enumFields(); const named_block = self.context.appendBasicBlock(fn_val, "Named"); const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed"); const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, fields.count())); + const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, enum_type.names.len)); - for (fields.keys(), 0..) |_, field_index| { + for (enum_type.names, 0..) |_, field_index| { const this_tag_int_value = int: { var tag_val_payload: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, @@ -8930,18 +8923,18 @@ pub const FuncGen = struct { fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { const mod = self.dg.module; - const enum_decl = enum_ty.getOwnerDecl(mod); + const enum_type = mod.intern_pool.indexToKey(enum_ty.ip_index).enum_type; // TODO: detect when the type changes and re-emit this function. - const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_decl); + const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_type.decl); if (gop.found_existing) return gop.value_ptr.*; - errdefer assert(self.dg.object.decl_map.remove(enum_decl)); + errdefer assert(self.dg.object.decl_map.remove(enum_type.decl)); var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_decl).getFullyQualifiedName(mod); + const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); @@ -8950,8 +8943,7 @@ pub const FuncGen = struct { const usize_llvm_ty = try self.dg.lowerType(Type.usize); const slice_alignment = slice_ty.abiAlignment(mod); - const int_tag_ty = try enum_ty.intTagType(mod); - const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; + const param_types = [_]*llvm.Type{try self.dg.lowerType(enum_type.tag_ty.toType())}; const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); const fn_val = self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); @@ -8973,16 +8965,16 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(entry_block); self.builder.clearCurrentDebugLocation(); - const fields = enum_ty.enumFields(); const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue"); const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, fields.count())); + const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, enum_type.names.len)); const array_ptr_indices = [_]*llvm.Value{ usize_llvm_ty.constNull(), usize_llvm_ty.constNull(), }; - for (fields.keys(), 0..) |name, field_index| { + for (enum_type.names, 0..) |name_ip, field_index| { + const name = mod.intern_pool.stringToSlice(name_ip); const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_init_llvm_ty = str_init.typeOf(); const str_global = self.dg.object.llvm_module.addGlobal(str_init_llvm_ty, ""); @@ -9429,7 +9421,7 @@ pub const FuncGen = struct { const tag_int = blk: { const tag_ty = union_ty.unionTagTypeHypothetical(mod); const union_field_name = union_obj.fields.keys()[extra.field_index]; - const enum_field_index = tag_ty.enumFieldIndex(union_field_name).?; + const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?; var tag_val_payload: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, .data = @intCast(u32, enum_field_index), diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index d1e8d9601bd3..e20e127800c6 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -401,14 +401,9 @@ pub const DeclState = struct { dbg_info_buffer.appendSliceAssumeCapacity(enum_name); dbg_info_buffer.appendAssumeCapacity(0); - const fields = ty.enumFields(); - const values: ?Module.EnumFull.ValueMap = switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Type.Payload.EnumFull).?.data.values, - .enum_simple => null, - .enum_numbered => ty.castTag(.enum_numbered).?.data.values, - else => unreachable, - }; - for (fields.keys(), 0..) |field_name, field_i| { + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + for (enum_type.names, 0..) |field_name_index, field_i| { + const field_name = mod.intern_pool.stringToSlice(field_name_index); // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2 + @sizeOf(u64)); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant)); @@ -416,14 +411,14 @@ pub const DeclState = struct { dbg_info_buffer.appendSliceAssumeCapacity(field_name); dbg_info_buffer.appendAssumeCapacity(0); // DW.AT.const_value, DW.FORM.data8 - const value: u64 = if (values) |vals| value: { - if (vals.count() == 0) break :value @intCast(u64, field_i); // auto-numbered - const value = vals.keys()[field_i]; + const value: u64 = value: { + if (enum_type.values.len == 0) break :value field_i; // auto-numbered + const value = enum_type.values[field_i]; // TODO do not assume a 64bit enum value - could be bigger. // See https://github.com/ziglang/zig/issues/645 - const field_int_val = try value.enumToInt(ty, mod); + const field_int_val = try value.toValue().enumToInt(ty, mod); break :value @bitCast(u64, field_int_val.toSignedInt(mod)); - } else @intCast(u64, field_i); + }; mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian); } diff --git a/src/type.zig b/src/type.zig index ab02b29d49be..a2644ebff4a6 100644 --- a/src/type.zig +++ b/src/type.zig @@ -62,12 +62,6 @@ pub const Type = struct { .tuple, .anon_struct, => return .Struct, - - .enum_full, - .enum_nonexhaustive, - .enum_simple, - .enum_numbered, - => return .Enum, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return .Int, @@ -566,22 +560,6 @@ pub const Type = struct { return true; }, - - .enum_full, .enum_nonexhaustive => { - const a_enum_obj = a.cast(Payload.EnumFull).?.data; - const b_enum_obj = (b.cast(Payload.EnumFull) orelse return false).data; - return a_enum_obj == b_enum_obj; - }, - .enum_simple => { - const a_enum_obj = a.cast(Payload.EnumSimple).?.data; - const b_enum_obj = (b.cast(Payload.EnumSimple) orelse return false).data; - return a_enum_obj == b_enum_obj; - }, - .enum_numbered => { - const a_enum_obj = a.cast(Payload.EnumNumbered).?.data; - const b_enum_obj = (b.cast(Payload.EnumNumbered) orelse return false).data; - return a_enum_obj == b_enum_obj; - }, } } @@ -727,22 +705,6 @@ pub const Type = struct { field_val.hash(field_ty, hasher, mod); } }, - - .enum_full, .enum_nonexhaustive => { - const enum_obj: *const Module.EnumFull = ty.cast(Payload.EnumFull).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Enum); - std.hash.autoHash(hasher, enum_obj); - }, - .enum_simple => { - const enum_obj: *const Module.EnumSimple = ty.cast(Payload.EnumSimple).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Enum); - std.hash.autoHash(hasher, enum_obj); - }, - .enum_numbered => { - const enum_obj: *const Module.EnumNumbered = ty.cast(Payload.EnumNumbered).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Enum); - std.hash.autoHash(hasher, enum_obj); - }, } } @@ -920,9 +882,6 @@ pub const Type = struct { .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet), .error_set_inferred => return self.copyPayloadShallow(allocator, Payload.ErrorSetInferred), .error_set_single => return self.copyPayloadShallow(allocator, Payload.Name), - .enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple), - .enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered), - .enum_full, .enum_nonexhaustive => return self.copyPayloadShallow(allocator, Payload.EnumFull), } } @@ -995,25 +954,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), enum_full.owner_decl, - }); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), enum_simple.owner_decl, - }); - }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), enum_numbered.owner_decl, - }); - }, - .function => { const payload = ty.castTag(.function).?.data; try writer.writeAll("fn("); @@ -1199,22 +1139,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - const decl = mod.declPtr(enum_full.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const decl = mod.declPtr(enum_simple.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - const decl = mod.declPtr(enum_numbered.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .error_set_inferred => { const func = ty.castTag(.error_set_inferred).?.data.func; @@ -1500,7 +1424,10 @@ pub const Type = struct { const decl = mod.declPtr(opaque_type.decl); try decl.renderFullyQualifiedName(mod, writer); }, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| { + const decl = mod.declPtr(enum_type.decl); + try decl.renderFullyQualifiedName(mod, writer); + }, // values, not types .un => unreachable, @@ -1593,19 +1520,6 @@ pub const Type = struct { } }, - .enum_full => { - const enum_full = ty.castTag(.enum_full).?.data; - return enum_full.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.fields.count() >= 2; - }, - .enum_numbered, .enum_nonexhaustive => { - const int_tag_ty = try ty.intTagType(mod); - return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); - }, - .array => return ty.arrayLen(mod) != 0 and try ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .array_sentinel => return ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), @@ -1766,7 +1680,7 @@ pub const Type = struct { }, .opaque_type => true, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| enum_type.tag_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), // values, not types .un => unreachable, @@ -1789,9 +1703,7 @@ pub const Type = struct { .empty_struct_type => false, .none => switch (ty.tag()) { - .pointer, - .enum_numbered, - => true, + .pointer => true, .error_set, .error_set_single, @@ -1799,17 +1711,12 @@ pub const Type = struct { .error_set_merged, // These are function bodies, not function pointers. .function, - .enum_simple, .error_union, .anyframe_T, .tuple, .anon_struct, => false, - .enum_full, - .enum_nonexhaustive, - => !ty.cast(Payload.EnumFull).?.data.tag_ty_inferred, - .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, @@ -1886,7 +1793,10 @@ pub const Type = struct { .tagged => false, }, .opaque_type => false, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .auto => false, + .explicit, .nonexhaustive => true, + }, // values, not types .un => unreachable, @@ -2116,11 +2026,6 @@ pub const Type = struct { return AbiAlignmentAdvanced{ .scalar = big_align }; }, - .enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => { - const int_tag_ty = try ty.intTagType(mod); - return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) }; - }, - .inferred_alloc_const, .inferred_alloc_mut, => unreachable, @@ -2283,7 +2188,7 @@ pub const Type = struct { return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); }, .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 }, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) }, // values, not types .un => unreachable, @@ -2475,11 +2380,6 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, - .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - const int_tag_ty = try ty.intTagType(mod); - return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) }; - }, - .array => { const payload = ty.castTag(.array).?.data; switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { @@ -2705,7 +2605,7 @@ pub const Type = struct { return abiSizeAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); }, .opaque_type => unreachable, // no size available - .enum_type => @panic("TODO"), + .enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = enum_type.tag_ty.toType().abiSize(mod) }, // values, not types .un => unreachable, @@ -2823,11 +2723,6 @@ pub const Type = struct { return total; }, - .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - const int_tag_ty = try ty.intTagType(mod); - return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); - }, - .array => { const payload = ty.castTag(.array).?.data; const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); @@ -2964,7 +2859,7 @@ pub const Type = struct { return size; }, .opaque_type => unreachable, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema), // values, not types .un => unreachable, @@ -3433,7 +3328,7 @@ pub const Type = struct { pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?usize { const union_obj = mod.typeToUnion(ty).?; const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag, mod) orelse return null; - const name = union_obj.tag_ty.enumFieldName(index); + const name = union_obj.tag_ty.enumFieldName(index, mod); return union_obj.fields.getIndex(name); } @@ -3690,15 +3585,6 @@ pub const Type = struct { while (true) switch (ty.ip_index) { .none => switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty, - .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, - .enum_simple => { - const enum_obj = ty.castTag(.enum_simple).?.data; - const field_count = enum_obj.fields.count(); - if (field_count == 0) return .{ .signedness = .unsigned, .bits = 0 }; - return .{ .signedness = .unsigned, .bits = smallestUnsignedBits(field_count - 1) }; - }, - .error_set, .error_set_single, .error_set_inferred, .error_set_merged => { // TODO revisit this when error sets support custom int types return .{ .signedness = .unsigned, .bits = 16 }; @@ -3728,7 +3614,7 @@ pub const Type = struct { assert(struct_obj.layout == .Packed); ty = struct_obj.backing_int_ty; }, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| ty = enum_type.tag_ty.toType(), .ptr_type => unreachable, .array_type => unreachable, @@ -3964,47 +3850,6 @@ pub const Type = struct { return Value.empty_struct; }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - // An explicit tag type is always provided for enum_numbered. - if (enum_numbered.tag_ty.hasRuntimeBits(mod)) { - return null; - } - assert(enum_numbered.fields.count() == 1); - return enum_numbered.values.keys()[0]; - }, - .enum_full => { - const enum_full = ty.castTag(.enum_full).?.data; - if (enum_full.tag_ty.hasRuntimeBits(mod)) { - return null; - } - switch (enum_full.fields.count()) { - 0 => return Value.@"unreachable", - 1 => if (enum_full.values.count() == 0) { - return Value.enum_field_0; // auto-numbered - } else { - return enum_full.values.keys()[0]; - }, - else => return null, - } - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - switch (enum_simple.fields.count()) { - 0 => return Value.@"unreachable", - 1 => return Value.enum_field_0, - else => return null, - } - }, - .enum_nonexhaustive => { - const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (!tag_ty.hasRuntimeBits(mod)) { - return Value.enum_field_0; - } else { - return null; - } - }, - .array => { if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); @@ -4123,7 +3968,28 @@ pub const Type = struct { return only.toValue(); }, .opaque_type => return null, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .nonexhaustive => { + if (enum_type.tag_ty != .comptime_int_type and + !enum_type.tag_ty.toType().hasRuntimeBits(mod)) + { + return Value.enum_field_0; + } else { + return null; + } + }, + .auto, .explicit => switch (enum_type.names.len) { + 0 => return Value.@"unreachable", + 1 => { + if (enum_type.values.len == 0) { + return Value.enum_field_0; // auto-numbered + } else { + return enum_type.values[0].toValue(); + } + }, + else => return null, + }, + }, // values, not types .un => unreachable, @@ -4151,7 +4017,6 @@ pub const Type = struct { .error_set_single, .error_set_inferred, .error_set_merged, - .enum_simple, => false, // These are function bodies, not function pointers. @@ -4191,14 +4056,6 @@ pub const Type = struct { const child_ty = ty.castTag(.anyframe_T).?.data; return child_ty.comptimeOnly(mod); }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return tag_ty.comptimeOnly(mod); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return tag_ty.comptimeOnly(mod); - }, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, @@ -4293,7 +4150,7 @@ pub const Type = struct { .opaque_type => false, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| enum_type.tag_ty.toType().comptimeOnly(mod), // values, not types .un => unreachable, @@ -4346,19 +4203,14 @@ pub const Type = struct { /// Returns null if the type has no namespace. pub fn getNamespaceIndex(ty: Type, mod: *Module) Module.Namespace.OptionalIndex { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .enum_full => ty.castTag(.enum_full).?.data.namespace.toOptional(), - .enum_nonexhaustive => ty.castTag(.enum_nonexhaustive).?.data.namespace.toOptional(), - else => .none, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), - .struct_type => |struct_type| struct_type.namespace, - .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), + if (ty.ip_index == .none) return .none; + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + .struct_type => |struct_type| struct_type.namespace, + .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), + .enum_type => |enum_type| enum_type.namespace, - else => .none, - }, + else => .none, }; } @@ -4444,29 +4296,23 @@ pub const Type = struct { /// Asserts the type is an enum or a union. pub fn intTagType(ty: Type, mod: *Module) !Type { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Payload.EnumFull).?.data.tag_ty, - .enum_numbered => ty.castTag(.enum_numbered).?.data.tag_ty, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const field_count = enum_simple.fields.count(); - const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); - return mod.intType(.unsigned, bits); - }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .union_type => |union_type| mod.unionPtr(union_type.index).tag_ty.intTagType(mod), - else => unreachable, - }, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .union_type => |union_type| mod.unionPtr(union_type.index).tag_ty.intTagType(mod), + .enum_type => |enum_type| enum_type.tag_ty.toType(), + else => unreachable, }; } - pub fn isNonexhaustiveEnum(ty: Type) bool { - return switch (ty.tag()) { - .enum_nonexhaustive => true, - else => false, + pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool { + return switch (ty.ip_index) { + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .nonexhaustive => true, + .auto, .explicit => false, + }, + else => false, + }, }; } @@ -4510,25 +4356,26 @@ pub const Type = struct { return try Tag.error_set_merged.create(arena, names); } - pub fn enumFields(ty: Type) Module.EnumFull.NameMap { - return switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Payload.EnumFull).?.data.fields, - .enum_simple => ty.castTag(.enum_simple).?.data.fields, - .enum_numbered => ty.castTag(.enum_numbered).?.data.fields, - else => unreachable, - }; + pub fn enumFields(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { + return mod.intern_pool.indexToKey(ty.ip_index).enum_type.names; } - pub fn enumFieldCount(ty: Type) usize { - return ty.enumFields().count(); + pub fn enumFieldCount(ty: Type, mod: *Module) usize { + return mod.intern_pool.indexToKey(ty.ip_index).enum_type.names.len; } - pub fn enumFieldName(ty: Type, field_index: usize) []const u8 { - return ty.enumFields().keys()[field_index]; + pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) [:0]const u8 { + const ip = &mod.intern_pool; + const field_name = ip.indexToKey(ty.ip_index).enum_type.names[field_index]; + return ip.stringToSlice(field_name); } - pub fn enumFieldIndex(ty: Type, field_name: []const u8) ?usize { - return ty.enumFields().getIndex(field_name); + pub fn enumFieldIndex(ty: Type, field_name: []const u8, mod: *Module) ?usize { + const ip = &mod.intern_pool; + const enum_type = ip.indexToKey(ty.ip_index).enum_type; + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(field_name).unwrap() orelse return null; + return enum_type.nameIndex(ip.*, field_name_interned); } /// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or @@ -4538,50 +4385,20 @@ pub const Type = struct { if (enum_tag.castTag(.enum_field_index)) |payload| { return @as(usize, payload.data); } - const S = struct { - fn fieldWithRange(int_ty: Type, int_val: Value, end: usize, m: *Module) ?usize { - if (int_val.compareAllWithZero(.lt, m)) return null; - const end_val = m.intValue(int_ty, end) catch |err| switch (err) { - // TODO: eliminate this failure condition - error.OutOfMemory => @panic("OOM"), - }; - if (int_val.compareScalar(.gte, end_val, int_ty, m)) return null; - return @intCast(usize, int_val.toUnsignedInt(m)); - } - }; - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - const tag_ty = enum_full.tag_ty; - if (enum_full.values.count() == 0) { - return S.fieldWithRange(tag_ty, enum_tag, enum_full.fields.count(), mod); - } else { - return enum_full.values.getIndexContext(enum_tag, .{ - .ty = tag_ty, - .mod = mod, - }); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - const tag_ty = enum_obj.tag_ty; - if (enum_obj.values.count() == 0) { - return S.fieldWithRange(tag_ty, enum_tag, enum_obj.fields.count(), mod); - } else { - return enum_obj.values.getIndexContext(enum_tag, .{ - .ty = tag_ty, - .mod = mod, - }); - } - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const fields_len = enum_simple.fields.count(); - const bits = std.math.log2_int_ceil(usize, fields_len); - const tag_ty = mod.intType(.unsigned, bits) catch @panic("TODO: handle OOM here"); - return S.fieldWithRange(tag_ty, enum_tag, fields_len, mod); - }, - else => unreachable, + const ip = &mod.intern_pool; + const enum_type = ip.indexToKey(ty.ip_index).enum_type; + const tag_ty = enum_type.tag_ty.toType(); + if (enum_type.values.len == 0) { + if (enum_tag.compareAllWithZero(.lt, mod)) return null; + const end_val = mod.intValue(tag_ty, enum_type.names.len) catch |err| switch (err) { + // TODO: eliminate this failure condition + error.OutOfMemory => @panic("OOM"), + }; + if (enum_tag.compareScalar(.gte, end_val, tag_ty, mod)) return null; + return @intCast(usize, enum_tag.toUnsignedInt(mod)); + } else { + assert(ip.typeOf(enum_tag.ip_index) == enum_type.tag_ty); + return enum_type.tagValueIndex(ip.*, enum_tag.ip_index); } } @@ -4905,18 +4722,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return null, .none => switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.srcLoc(mod); - }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - return enum_numbered.srcLoc(mod); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.srcLoc(mod); - }, .error_set => { const error_set = ty.castTag(.error_set).?.data; return error_set.srcLoc(mod); @@ -4934,6 +4739,7 @@ pub const Type = struct { return union_obj.srcLoc(mod); }, .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), + .enum_type => |enum_type| mod.declPtr(enum_type.decl).srcLoc(mod), else => null, }, } @@ -4946,15 +4752,6 @@ pub const Type = struct { pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?Module.Decl.Index { switch (ty.ip_index) { .none => switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.owner_decl; - }, - .enum_numbered => return ty.castTag(.enum_numbered).?.data.owner_decl, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.owner_decl; - }, .error_set => { const error_set = ty.castTag(.error_set).?.data; return error_set.owner_decl; @@ -4972,6 +4769,7 @@ pub const Type = struct { return union_obj.owner_decl; }, .opaque_type => |opaque_type| opaque_type.decl, + .enum_type => |enum_type| enum_type.decl, else => null, }, } @@ -5012,10 +4810,6 @@ pub const Type = struct { /// The type is the inferred error set of a specific function. error_set_inferred, error_set_merged, - enum_simple, - enum_numbered, - enum_full, - enum_nonexhaustive, pub const last_no_payload_tag = Tag.inferred_alloc_const; pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; @@ -5040,9 +4834,6 @@ pub const Type = struct { .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, - .enum_full, .enum_nonexhaustive => Payload.EnumFull, - .enum_simple => Payload.EnumSimple, - .enum_numbered => Payload.EnumNumbered, .tuple => Payload.Tuple, .anon_struct => Payload.AnonStruct, }; @@ -5341,21 +5132,6 @@ pub const Type = struct { values: []Value, }; }; - - pub const EnumFull = struct { - base: Payload, - data: *Module.EnumFull, - }; - - pub const EnumSimple = struct { - base: Payload = .{ .tag = .enum_simple }, - data: *Module.EnumSimple, - }; - - pub const EnumNumbered = struct { - base: Payload = .{ .tag = .enum_numbered }, - data: *Module.EnumNumbered, - }; }; pub const @"u1": Type = .{ .ip_index = .u1_type, .legacy = undefined }; diff --git a/src/value.zig b/src/value.zig index dfeaa4442829..3f7e8050a4bd 100644 --- a/src/value.zig +++ b/src/value.zig @@ -675,80 +675,50 @@ pub const Value = struct { const field_index = switch (val.tag()) { .enum_field_index => val.castTag(.enum_field_index).?.data, .the_only_possible_value => blk: { - assert(ty.enumFieldCount() == 1); + assert(ty.enumFieldCount(mod) == 1); break :blk 0; }, .enum_literal => i: { const name = val.castTag(.enum_literal).?.data; - break :i ty.enumFieldIndex(name).?; + break :i ty.enumFieldIndex(name, mod).?; }, // Assume it is already an integer and return it directly. else => return val, }; - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - return enum_full.values.keys()[field_index]; - } else { - // Field index and integer values are the same. - return mod.intValue(enum_full.tag_ty, field_index); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - if (enum_obj.values.count() != 0) { - return enum_obj.values.keys()[field_index]; - } else { - // Field index and integer values are the same. - return mod.intValue(enum_obj.tag_ty, field_index); - } - }, - .enum_simple => { - // Field index and integer values are the same. - const tag_ty = try ty.intTagType(mod); - return mod.intValue(tag_ty, field_index); - }, - else => unreachable, + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + if (enum_type.values.len != 0) { + return enum_type.values[field_index].toValue(); + } else { + // Field index and integer values are the same. + return mod.intValue(enum_type.tag_ty.toType(), field_index); } } pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(mod), mod); + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + const field_index = switch (val.tag()) { .enum_field_index => val.castTag(.enum_field_index).?.data, .the_only_possible_value => blk: { - assert(ty.enumFieldCount() == 1); + assert(ty.enumFieldCount(mod) == 1); break :blk 0; }, .enum_literal => return val.castTag(.enum_literal).?.data, else => field_index: { - const values = switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Type.Payload.EnumFull).?.data.values, - .enum_numbered => ty.castTag(.enum_numbered).?.data.values, - .enum_simple => Module.EnumFull.ValueMap{}, - else => unreachable, - }; - if (values.entries.len == 0) { + if (enum_type.values.len == 0) { // auto-numbered enum break :field_index @intCast(u32, val.toUnsignedInt(mod)); } - const int_tag_ty = ty.intTagType(mod) catch |err| switch (err) { - error.OutOfMemory => @panic("OOM"), // TODO handle this failure - }; - break :field_index @intCast(u32, values.getIndexContext(val, .{ .ty = int_tag_ty, .mod = mod }).?); + const field_index = enum_type.tagValueIndex(mod.intern_pool, val.ip_index).?; + break :field_index @intCast(u32, field_index); }, }; - const fields = switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Type.Payload.EnumFull).?.data.fields, - .enum_numbered => ty.castTag(.enum_numbered).?.data.fields, - .enum_simple => ty.castTag(.enum_simple).?.data.fields, - else => unreachable, - }; - return fields.keys()[field_index]; + const field_name = enum_type.names[field_index]; + return mod.intern_pool.stringToSlice(field_name); } /// Asserts the value is an integer. From 466328d1ca29f3f6dd142f74dda13b26687e71e0 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 9 May 2023 17:06:10 +0100 Subject: [PATCH 063/205] InternPool: transition float values --- src/InternPool.zig | 195 ++++++- src/Module.zig | 18 + src/Sema.zig | 214 ++----- src/TypedValue.zig | 8 +- src/codegen/c.zig | 17 +- src/codegen/llvm.zig | 17 +- src/type.zig | 10 + src/value.zig | 1288 +++++++++++++++++------------------------- 8 files changed, 792 insertions(+), 975 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 6ff68a758368..2677fba45d1d 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -155,6 +155,7 @@ pub const Key = union(enum) { lib_name: u32, }, int: Key.Int, + float: Key.Float, ptr: Ptr, opt: Opt, enum_tag: struct { @@ -361,6 +362,20 @@ pub const Key = union(enum) { }; }; + pub const Float = struct { + ty: Index, + /// The storage used must match the size of the float type being represented. + storage: Storage, + + pub const Storage = union(enum) { + f16: f16, + f32: f32, + f64: f64, + f80: f80, + f128: f128, + }; + }; + pub const Ptr = struct { ty: Index, addr: Addr, @@ -436,6 +451,16 @@ pub const Key = union(enum) { for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb); }, + .float => |float| { + std.hash.autoHash(hasher, float.ty); + switch (float.storage) { + inline else => |val| std.hash.autoHash( + hasher, + @bitCast(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), val), + ), + } + }, + .ptr => |ptr| { std.hash.autoHash(hasher, ptr.ty); // Int-to-ptr pointers are hashed separately than decl-referencing pointers. @@ -561,6 +586,32 @@ pub const Key = union(enum) { }; }, + .float => |a_info| { + const b_info = b.float; + + if (a_info.ty != b_info.ty) + return false; + + if (a_info.ty == .c_longdouble_type and a_info.storage != .f80) { + // These are strange: we'll sometimes represent them as f128, even if the + // underlying type is smaller. f80 is an exception: see float_c_longdouble_f80. + const a_val = switch (a_info.storage) { + inline else => |val| @floatCast(f128, val), + }; + const b_val = switch (b_info.storage) { + inline else => |val| @floatCast(f128, val), + }; + return a_val == b_val; + } + + const StorageTag = @typeInfo(Key.Float.Storage).Union.tag_type.?; + assert(@as(StorageTag, a_info.storage) == @as(StorageTag, b_info.storage)); + + return switch (a_info.storage) { + inline else => |val, tag| val == @field(b_info.storage, @tagName(tag)), + }; + }, + .enum_tag => |a_info| { const b_info = b.enum_tag; _ = a_info; @@ -601,6 +652,7 @@ pub const Key = union(enum) { inline .ptr, .int, + .float, .opt, .extern_func, .enum_tag, @@ -1115,15 +1167,35 @@ pub const Tag = enum(u8) { /// An enum tag identified by a negative integer value. /// data is a limbs index to Int. enum_tag_negative, + /// An f16 value. + /// data is float value bitcasted to u16 and zero-extended. + float_f16, /// An f32 value. /// data is float value bitcasted to u32. float_f32, /// An f64 value. /// data is extra index to Float64. float_f64, + /// An f80 value. + /// data is extra index to Float80. + float_f80, /// An f128 value. /// data is extra index to Float128. float_f128, + /// A c_longdouble value of 80 bits. + /// data is extra index to Float80. + /// This is used when a c_longdouble value is provided as an f80, because f80 has unnormalized + /// values which cannot be losslessly represented as f128. It should only be used when the type + /// underlying c_longdouble for the target is 80 bits. + float_c_longdouble_f80, + /// A c_longdouble value of 128 bits. + /// data is extra index to Float128. + /// This is used when a c_longdouble value is provided as any type other than an f80, since all + /// other float types can be losslessly converted to and from f128. + float_c_longdouble_f128, + /// A comptime_float value. + /// data is extra index to Float128. + float_comptime_float, /// An extern function. extern_func, /// A regular function. @@ -1339,7 +1411,38 @@ pub const Float64 = struct { pub fn get(self: Float64) f64 { const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32); - return @bitCast(u64, int_bits); + return @bitCast(f64, int_bits); + } + + fn pack(val: f64) Float64 { + const bits = @bitCast(u64, val); + return .{ + .piece0 = @truncate(u32, bits), + .piece1 = @truncate(u32, bits >> 32), + }; + } +}; + +/// A f80 value, broken up into 2 u32 parts and a u16 part zero-padded to a u32. +pub const Float80 = struct { + piece0: u32, + piece1: u32, + piece2: u32, // u16 part, top bits + + pub fn get(self: Float80) f80 { + const int_bits = @as(u80, self.piece0) | + (@as(u80, self.piece1) << 32) | + (@as(u80, self.piece2) << 64); + return @bitCast(f80, int_bits); + } + + fn pack(val: f80) Float80 { + const bits = @bitCast(u80, val); + return .{ + .piece0 = @truncate(u32, bits), + .piece1 = @truncate(u32, bits >> 32), + .piece2 = @truncate(u16, bits >> 64), + }; } }; @@ -1357,6 +1460,16 @@ pub const Float128 = struct { (@as(u128, self.piece3) << 96); return @bitCast(f128, int_bits); } + + fn pack(val: f128) Float128 { + const bits = @bitCast(u128, val); + return .{ + .piece0 = @truncate(u32, bits), + .piece1 = @truncate(u32, bits >> 32), + .piece2 = @truncate(u32, bits >> 64), + .piece3 = @truncate(u32, bits >> 96), + }; + } }; pub fn init(ip: *InternPool, gpa: Allocator) !void { @@ -1576,9 +1689,38 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .int_negative => indexToKeyBigInt(ip, data, false), .enum_tag_positive => @panic("TODO"), .enum_tag_negative => @panic("TODO"), - .float_f32 => @panic("TODO"), - .float_f64 => @panic("TODO"), - .float_f128 => @panic("TODO"), + .float_f16 => .{ .float = .{ + .ty = .f16_type, + .storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) }, + } }, + .float_f32 => .{ .float = .{ + .ty = .f32_type, + .storage = .{ .f32 = @bitCast(f32, data) }, + } }, + .float_f64 => .{ .float = .{ + .ty = .f64_type, + .storage = .{ .f64 = ip.extraData(Float64, data).get() }, + } }, + .float_f80 => .{ .float = .{ + .ty = .f80_type, + .storage = .{ .f80 = ip.extraData(Float80, data).get() }, + } }, + .float_f128 => .{ .float = .{ + .ty = .f128_type, + .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + } }, + .float_c_longdouble_f80 => .{ .float = .{ + .ty = .c_longdouble_type, + .storage = .{ .f80 = ip.extraData(Float80, data).get() }, + } }, + .float_c_longdouble_f128 => .{ .float = .{ + .ty = .c_longdouble_type, + .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + } }, + .float_comptime_float => .{ .float = .{ + .ty = .comptime_float_type, + .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + } }, .extern_func => @panic("TODO"), .func => @panic("TODO"), .only_possible_value => { @@ -1982,6 +2124,46 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } }, + .float => |float| { + switch (float.ty) { + .f16_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f16, + .data = @bitCast(u16, float.storage.f16), + }), + .f32_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f32, + .data = @bitCast(u32, float.storage.f32), + }), + .f64_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f64, + .data = try ip.addExtra(gpa, Float64.pack(float.storage.f64)), + }), + .f80_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f80, + .data = try ip.addExtra(gpa, Float80.pack(float.storage.f80)), + }), + .f128_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f128, + .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), + }), + .c_longdouble_type => switch (float.storage) { + .f80 => |x| ip.items.appendAssumeCapacity(.{ + .tag = .float_c_longdouble_f80, + .data = try ip.addExtra(gpa, Float80.pack(x)), + }), + inline .f16, .f32, .f64, .f128 => |x| ip.items.appendAssumeCapacity(.{ + .tag = .float_c_longdouble_f128, + .data = try ip.addExtra(gpa, Float128.pack(x)), + }), + }, + .comptime_float_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_comptime_float, + .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), + }), + else => unreachable, + } + }, + .enum_tag => |enum_tag| { const tag: Tag = if (enum_tag.tag.positive) .enum_tag_positive else .enum_tag_negative; try addInt(ip, gpa, enum_tag.ty, tag, enum_tag.tag.limbs); @@ -2645,9 +2827,14 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { break :b @sizeOf(Int) + int.limbs_len * 8; }, + .float_f16 => 0, .float_f32 => 0, .float_f64 => @sizeOf(Float64), + .float_f80 => @sizeOf(Float80), .float_f128 => @sizeOf(Float128), + .float_c_longdouble_f80 => @sizeOf(Float80), + .float_c_longdouble_f128 => @sizeOf(Float128), + .float_comptime_float => @sizeOf(Float128), .extern_func => @panic("TODO"), .func => @panic("TODO"), .only_possible_value => 0, diff --git a/src/Module.zig b/src/Module.zig index 6bcd148e67df..b32904e16557 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6940,6 +6940,24 @@ pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocato return i.toValue(); } +/// This function casts the float representation down to the representation of the type, potentially +/// losing data if the representation wasn't correct. +pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { + const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(mod.getTarget())) { + 16 => .{ .f16 = @floatCast(f16, x) }, + 32 => .{ .f32 = @floatCast(f32, x) }, + 64 => .{ .f64 = @floatCast(f64, x) }, + 80 => .{ .f80 = @floatCast(f80, x) }, + 128 => .{ .f128 = @floatCast(f128, x) }, + else => unreachable, + }; + const i = try intern(mod, .{ .float = .{ + .ty = ty.ip_index, + .storage = storage, + } }); + return i.toValue(); +} + pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); } diff --git a/src/Sema.zig b/src/Sema.zig index b94f995b46b5..b5baf4dde392 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3225,7 +3225,7 @@ fn zirOpaqueDecl( const new_namespace = mod.namespacePtr(new_namespace_index); errdefer mod.destroyNamespace(new_namespace_index); - const opaque_ty = try mod.intern_pool.get(gpa, .{ .opaque_type = .{ + const opaque_ty = try mod.intern(.{ .opaque_type = .{ .decl = new_decl_index, .namespace = new_namespace_index, } }); @@ -5196,23 +5196,21 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; - const arena = sema.arena; const number = sema.code.instructions.items(.data)[inst].float; return sema.addConstant( Type.comptime_float, - try Value.Tag.float_64.create(arena, number), + try sema.mod.floatValue(Type.comptime_float, number), ); } fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; - const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; const number = extra.get(); return sema.addConstant( Type.comptime_float, - try Value.Tag.float_128.create(arena, number), + try sema.mod.floatValue(Type.comptime_float, number), ); } @@ -9952,7 +9950,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(dest_ty, try operand_val.floatCast(sema.arena, dest_ty, mod)); + return sema.addConstant(dest_ty, try operand_val.floatCast(dest_ty, mod)); } if (dest_is_comptime_float) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_float'", .{}); @@ -13302,7 +13300,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13441,7 +13439,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13526,7 +13524,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13616,7 +13614,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13737,7 +13735,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13895,7 +13893,10 @@ fn addDivByZeroSafety( if (maybe_rhs_val != null) return; const mod = sema.mod; - const scalar_zero = if (is_int) try mod.intValue(resolved_type.scalarType(mod), 0) else Value.float_zero; // TODO migrate to internpool + const scalar_zero = if (is_int) + try mod.intValue(resolved_type.scalarType(mod), 0) + else + try mod.floatValue(resolved_type.scalarType(mod), 0); const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); @@ -13981,7 +13982,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -14641,7 +14642,7 @@ fn analyzeArithmetic( } else { return sema.addConstant( resolved_type, - try sema.floatAdd(lhs_val, rhs_val, resolved_type), + try Value.floatAdd(lhs_val, rhs_val, resolved_type, sema.arena, mod), ); } } else break :rs .{ .src = rhs_src, .air_tag = air_tag }; @@ -14738,7 +14739,7 @@ fn analyzeArithmetic( } else { return sema.addConstant( resolved_type, - try sema.floatSub(lhs_val, rhs_val, resolved_type), + try Value.floatSub(lhs_val, rhs_val, resolved_type, sema.arena, mod), ); } } else break :rs .{ .src = rhs_src, .air_tag = air_tag }; @@ -14808,22 +14809,25 @@ fn analyzeArithmetic( // the result is nan. // If either of the operands are nan, the result is nan. const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { - if (lhs_val.isNan()) { + if (lhs_val.isNan(mod)) { return sema.addConstant(resolved_type, lhs_val); } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) lz: { if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isNan()) { + if (rhs_val.isNan(mod)) { return sema.addConstant(resolved_type, rhs_val); } - if (rhs_val.isInf()) { - return sema.addConstant(resolved_type, try Value.Tag.float_32.create(sema.arena, std.math.nan_f32)); + if (rhs_val.isInf(mod)) { + return sema.addConstant( + resolved_type, + try mod.floatValue(resolved_type, std.math.nan_f128), + ); } } else if (resolved_type.isAnyFloat()) { break :lz; @@ -14847,13 +14851,16 @@ fn analyzeArithmetic( return sema.addConstUndef(resolved_type); } } - if (rhs_val.isNan()) { + if (rhs_val.isNan(mod)) { return sema.addConstant(resolved_type, rhs_val); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) rz: { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isInf()) { - return sema.addConstant(resolved_type, try Value.Tag.float_32.create(sema.arena, std.math.nan_f32)); + if (lhs_val.isInf(mod)) { + return sema.addConstant( + resolved_type, + try mod.floatValue(resolved_type, std.math.nan_f128), + ); } } else if (resolved_type.isAnyFloat()) { break :rz; @@ -14896,7 +14903,7 @@ fn analyzeArithmetic( // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -14944,7 +14951,7 @@ fn analyzeArithmetic( // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -19167,7 +19174,7 @@ fn zirReify( const new_namespace = mod.namespacePtr(new_namespace_index); errdefer mod.destroyNamespace(new_namespace_index); - const opaque_ty = try mod.intern_pool.get(gpa, .{ .opaque_type = .{ + const opaque_ty = try mod.intern(.{ .opaque_type = .{ .decl = new_decl_index, .namespace = new_namespace_index, } }); @@ -25678,7 +25685,7 @@ fn coerceExtra( // Keep the comptime Value representation; take the new type. return sema.addConstant(dest_ty, val); } else { - const new_val = try mod.intern_pool.getCoerced(mod.gpa, val.ip_index, dest_ty.ip_index); + const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.ip_index, dest_ty.ip_index); return sema.addConstant(dest_ty, new_val.toValue()); } } @@ -26032,7 +26039,7 @@ fn coerceExtra( break :float; }; - if (val.floatHasFraction()) { + if (val.floatHasFraction(mod)) { return sema.fail( block, inst_src, @@ -26081,7 +26088,7 @@ fn coerceExtra( .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) { .ComptimeFloat => { const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const result_val = try val.floatCast(sema.arena, dest_ty, mod); + const result_val = try val.floatCast(dest_ty, mod); return try sema.addConstant(dest_ty, result_val); }, .Float => { @@ -26089,7 +26096,7 @@ fn coerceExtra( return sema.addConstUndef(dest_ty); } if (try sema.resolveMaybeUndefVal(inst)) |val| { - const result_val = try val.floatCast(sema.arena, dest_ty, mod); + const result_val = try val.floatCast(dest_ty, mod); if (!val.eql(result_val, inst_ty, sema.mod)) { return sema.fail( block, @@ -30071,7 +30078,7 @@ fn cmpNumeric( if (lhs_val.isUndef() or rhs_val.isUndef()) { return sema.addConstUndef(Type.bool); } - if (lhs_val.isNan() or rhs_val.isNan()) { + if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) { if (op == std.math.CompareOperator.neq) { return Air.Inst.Ref.bool_true; } else { @@ -30166,15 +30173,15 @@ fn cmpNumeric( try sema.resolveLazyValue(lhs_val); if (lhs_val.isUndef()) return sema.addConstUndef(Type.bool); - if (lhs_val.isNan()) switch (op) { + if (lhs_val.isNan(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, else => return Air.Inst.Ref.bool_false, }; - if (lhs_val.isInf()) switch (op) { + if (lhs_val.isInf(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, .eq => return Air.Inst.Ref.bool_false, - .gt, .gte => return if (lhs_val.isNegativeInf()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, - .lt, .lte => return if (lhs_val.isNegativeInf()) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, + .gt, .gte => return if (lhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, + .lt, .lte => return if (lhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, }; if (!rhs_is_signed) { switch (lhs_val.orderAgainstZero(mod)) { @@ -30191,7 +30198,7 @@ fn cmpNumeric( } } if (lhs_is_float) { - if (lhs_val.floatHasFraction()) { + if (lhs_val.floatHasFraction(mod)) { switch (op) { .eq => return Air.Inst.Ref.bool_false, .neq => return Air.Inst.Ref.bool_true, @@ -30201,7 +30208,7 @@ fn cmpNumeric( var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, mod)); defer bigint.deinit(); - if (lhs_val.floatHasFraction()) { + if (lhs_val.floatHasFraction(mod)) { if (lhs_is_signed) { try bigint.addScalar(&bigint, -1); } else { @@ -30225,15 +30232,15 @@ fn cmpNumeric( try sema.resolveLazyValue(rhs_val); if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); - if (rhs_val.isNan()) switch (op) { + if (rhs_val.isNan(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, else => return Air.Inst.Ref.bool_false, }; - if (rhs_val.isInf()) switch (op) { + if (rhs_val.isInf(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, .eq => return Air.Inst.Ref.bool_false, - .gt, .gte => return if (rhs_val.isNegativeInf()) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, - .lt, .lte => return if (rhs_val.isNegativeInf()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, + .gt, .gte => return if (rhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, + .lt, .lte => return if (rhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, }; if (!lhs_is_signed) { switch (rhs_val.orderAgainstZero(mod)) { @@ -30250,7 +30257,7 @@ fn cmpNumeric( } } if (rhs_is_float) { - if (rhs_val.floatHasFraction()) { + if (rhs_val.floatHasFraction(mod)) { switch (op) { .eq => return Air.Inst.Ref.bool_false, .neq => return Air.Inst.Ref.bool_true, @@ -30260,7 +30267,7 @@ fn cmpNumeric( var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, mod)); defer bigint.deinit(); - if (rhs_val.floatHasFraction()) { + if (rhs_val.floatHasFraction(mod)) { if (rhs_is_signed) { try bigint.addScalar(&bigint, -1); } else { @@ -31713,6 +31720,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, @@ -33216,6 +33224,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, @@ -33777,6 +33786,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, @@ -33935,7 +33945,7 @@ fn numberAddWrapScalar( } if (ty.isAnyFloat()) { - return sema.floatAdd(lhs, rhs, ty); + return Value.floatAdd(lhs, rhs, ty, sema.arena, mod); } const overflow_result = try sema.intAddWithOverflow(lhs, rhs, ty); @@ -33989,127 +33999,13 @@ fn numberSubWrapScalar( } if (ty.isAnyFloat()) { - return sema.floatSub(lhs, rhs, ty); + return Value.floatSub(lhs, rhs, ty, sema.arena, mod); } const overflow_result = try sema.intSubWithOverflow(lhs, rhs, ty); return overflow_result.wrapped_result; } -fn floatAdd( - sema: *Sema, - lhs: Value, - rhs: Value, - float_type: Type, -) !Value { - const mod = sema.mod; - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(sema.mod, i); - const rhs_elem = try rhs.elemValue(sema.mod, i); - scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); - } - return Value.Tag.aggregate.create(sema.arena, result_data); - } - return sema.floatAddScalar(lhs, rhs, float_type); -} - -fn floatAddScalar( - sema: *Sema, - lhs: Value, - rhs: Value, - float_type: Type, -) !Value { - const mod = sema.mod; - const target = sema.mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16, mod); - const rhs_val = rhs.toFloat(f16, mod); - return Value.Tag.float_16.create(sema.arena, lhs_val + rhs_val); - }, - 32 => { - const lhs_val = lhs.toFloat(f32, mod); - const rhs_val = rhs.toFloat(f32, mod); - return Value.Tag.float_32.create(sema.arena, lhs_val + rhs_val); - }, - 64 => { - const lhs_val = lhs.toFloat(f64, mod); - const rhs_val = rhs.toFloat(f64, mod); - return Value.Tag.float_64.create(sema.arena, lhs_val + rhs_val); - }, - 80 => { - const lhs_val = lhs.toFloat(f80, mod); - const rhs_val = rhs.toFloat(f80, mod); - return Value.Tag.float_80.create(sema.arena, lhs_val + rhs_val); - }, - 128 => { - const lhs_val = lhs.toFloat(f128, mod); - const rhs_val = rhs.toFloat(f128, mod); - return Value.Tag.float_128.create(sema.arena, lhs_val + rhs_val); - }, - else => unreachable, - } -} - -fn floatSub( - sema: *Sema, - lhs: Value, - rhs: Value, - float_type: Type, -) !Value { - const mod = sema.mod; - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(sema.mod, i); - const rhs_elem = try rhs.elemValue(sema.mod, i); - scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); - } - return Value.Tag.aggregate.create(sema.arena, result_data); - } - return sema.floatSubScalar(lhs, rhs, float_type); -} - -fn floatSubScalar( - sema: *Sema, - lhs: Value, - rhs: Value, - float_type: Type, -) !Value { - const mod = sema.mod; - const target = sema.mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16, mod); - const rhs_val = rhs.toFloat(f16, mod); - return Value.Tag.float_16.create(sema.arena, lhs_val - rhs_val); - }, - 32 => { - const lhs_val = lhs.toFloat(f32, mod); - const rhs_val = rhs.toFloat(f32, mod); - return Value.Tag.float_32.create(sema.arena, lhs_val - rhs_val); - }, - 64 => { - const lhs_val = lhs.toFloat(f64, mod); - const rhs_val = rhs.toFloat(f64, mod); - return Value.Tag.float_64.create(sema.arena, lhs_val - rhs_val); - }, - 80 => { - const lhs_val = lhs.toFloat(f80, mod); - const rhs_val = rhs.toFloat(f80, mod); - return Value.Tag.float_80.create(sema.arena, lhs_val - rhs_val); - }, - 128 => { - const lhs_val = lhs.toFloat(f128, mod); - const rhs_val = rhs.toFloat(f128, mod); - return Value.Tag.float_128.create(sema.arena, lhs_val - rhs_val); - }, - else => unreachable, - } -} - fn intSubWithOverflow( sema: *Sema, lhs: Value, diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 5f295e42f30e..57ef662a9e21 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -283,11 +283,6 @@ pub fn print( } return writer.writeAll(" }"); }, - .float_16 => return writer.print("{d}", .{val.castTag(.float_16).?.data}), - .float_32 => return writer.print("{d}", .{val.castTag(.float_32).?.data}), - .float_64 => return writer.print("{d}", .{val.castTag(.float_64).?.data}), - .float_80 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_80).?.data)}), - .float_128 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_128).?.data)}), .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}), .eu_payload => { val = val.castTag(.eu_payload).?.data; @@ -363,6 +358,9 @@ pub fn print( .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), }, + .float => |float| switch (float.storage) { + inline else => |x| return writer.print("{}", .{x}), + }, else => return writer.print("{}", .{val.ip_index}), } }, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a67d39471ad2..10a4856ad03c 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6612,7 +6612,6 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const reduce = f.air.instructions.items(.data)[inst].reduce; - const target = mod.getTarget(); const scalar_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(reduce.operand); try reap(f, inst, &.{reduce.operand}); @@ -6679,16 +6678,6 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa); defer arena.deinit(); - const ExpectedContents = union { - f16: Value.Payload.Float_16, - f32: Value.Payload.Float_32, - f64: Value.Payload.Float_64, - f80: Value.Payload.Float_80, - f128: Value.Payload.Float_128, - }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { .Or, .Xor, .Add => try mod.intValue(scalar_ty, 0), .And => switch (scalar_ty.zigTypeTag(mod)) { @@ -6701,13 +6690,13 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .Min => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one_comptime_int, .Int => try scalar_ty.maxIntScalar(mod, scalar_ty), - .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), + .Float => try mod.floatValue(scalar_ty, std.math.nan_f128), else => unreachable, }, .Max => switch (scalar_ty.zigTypeTag(mod)) { .Bool => try mod.intValue(scalar_ty, 0), - .Int => try scalar_ty.minInt(stack.get(), mod), - .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), + .Int => try scalar_ty.minInt(arena.allocator(), mod), + .Float => try mod.floatValue(scalar_ty, std.math.nan_f128), else => unreachable, }, .Mul => try mod.intValue(Type.comptime_int, 1), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 583c08583c31..cc766c956227 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -9238,22 +9238,19 @@ pub const FuncGen = struct { }) catch unreachable, else => unreachable, }; - var init_value_payload = Value.Payload.Float_32{ - .data = switch (reduce.operation) { - .Min => std.math.nan(f32), - .Max => std.math.nan(f32), - .Add => -0.0, - .Mul => 1.0, - else => unreachable, - }, - }; const param_llvm_ty = try self.dg.lowerType(scalar_ty); const param_types = [2]*llvm.Type{ param_llvm_ty, param_llvm_ty }; const libc_fn = self.getLibcFunction(fn_name, ¶m_types, param_llvm_ty); const init_value = try self.dg.lowerValue(.{ .ty = scalar_ty, - .val = Value.initPayload(&init_value_payload.base), + .val = try mod.floatValue(scalar_ty, switch (reduce.operation) { + .Min => std.math.nan(f32), + .Max => std.math.nan(f32), + .Add => -0.0, + .Mul => 1.0, + else => unreachable, + }), }); return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_value); } diff --git a/src/type.zig b/src/type.zig index a2644ebff4a6..2e4e9ca6fec7 100644 --- a/src/type.zig +++ b/src/type.zig @@ -133,6 +133,7 @@ pub const Type = struct { .un => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, @@ -1434,6 +1435,7 @@ pub const Type = struct { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, @@ -1687,6 +1689,7 @@ pub const Type = struct { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, @@ -1803,6 +1806,7 @@ pub const Type = struct { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, @@ -2195,6 +2199,7 @@ pub const Type = struct { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, @@ -2612,6 +2617,7 @@ pub const Type = struct { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, @@ -2866,6 +2872,7 @@ pub const Type = struct { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, @@ -3632,6 +3639,7 @@ pub const Type = struct { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, @@ -3996,6 +4004,7 @@ pub const Type = struct { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, @@ -4157,6 +4166,7 @@ pub const Type = struct { .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, + .float => unreachable, .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, diff --git a/src/value.zig b/src/value.zig index 3f7e8050a4bd..bb3716d28ecb 100644 --- a/src/value.zig +++ b/src/value.zig @@ -72,11 +72,6 @@ pub const Value = struct { empty_array_sentinel, /// Pointer and length as sub `Value` objects. slice, - float_16, - float_32, - float_64, - float_80, - float_128, enum_literal, /// A specific enum tag, indicated by the field index (declaration order). enum_field_index, @@ -160,11 +155,6 @@ pub const Value = struct { .decl_ref_mut => Payload.DeclRefMut, .elem_ptr => Payload.ElemPtr, .field_ptr => Payload.FieldPtr, - .float_16 => Payload.Float_16, - .float_32 => Payload.Float_32, - .float_64 => Payload.Float_64, - .float_80 => Payload.Float_80, - .float_128 => Payload.Float_128, .@"error" => Payload.Error, .inferred_alloc => Payload.InferredAlloc, .inferred_alloc_comptime => Payload.InferredAllocComptime, @@ -395,11 +385,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .float_16 => return self.copyPayloadShallow(arena, Payload.Float_16), - .float_32 => return self.copyPayloadShallow(arena, Payload.Float_32), - .float_64 => return self.copyPayloadShallow(arena, Payload.Float_64), - .float_80 => return self.copyPayloadShallow(arena, Payload.Float_80), - .float_128 => return self.copyPayloadShallow(arena, Payload.Float_128), .enum_literal => { const payload = self.castTag(.enum_literal).?; const new_payload = try arena.create(Payload.Bytes); @@ -544,11 +529,6 @@ pub const Value = struct { }, .empty_array_sentinel => return out_stream.writeAll("(empty array with sentinel)"), .slice => return out_stream.writeAll("(slice)"), - .float_16 => return out_stream.print("{}", .{val.castTag(.float_16).?.data}), - .float_32 => return out_stream.print("{}", .{val.castTag(.float_32).?.data}), - .float_64 => return out_stream.print("{}", .{val.castTag(.float_64).?.data}), - .float_80 => return out_stream.print("{}", .{val.castTag(.float_80).?.data}), - .float_128 => return out_stream.print("{}", .{val.castTag(.float_128).?.data}), .@"error" => return out_stream.print("error.{s}", .{val.castTag(.@"error").?.data.name}), .eu_payload => { try out_stream.writeAll("(eu_payload) "); @@ -1181,14 +1161,17 @@ pub const Value = struct { return mod.intValue_big(ty, bigint.toConst()); } }, - .Float => switch (ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, @bitCast(f16, std.mem.readInt(u16, buffer[0..2], endian))), - 32 => return Value.Tag.float_32.create(arena, @bitCast(f32, std.mem.readInt(u32, buffer[0..4], endian))), - 64 => return Value.Tag.float_64.create(arena, @bitCast(f64, std.mem.readInt(u64, buffer[0..8], endian))), - 80 => return Value.Tag.float_80.create(arena, @bitCast(f80, std.mem.readInt(u80, buffer[0..10], endian))), - 128 => return Value.Tag.float_128.create(arena, @bitCast(f128, std.mem.readInt(u128, buffer[0..16], endian))), - else => unreachable, - }, + .Float => return (try mod.intern(.{ .float = .{ + .ty = ty.ip_index, + .storage = switch (ty.floatBits(target)) { + 16 => .{ .f16 = @bitCast(f16, std.mem.readInt(u16, buffer[0..2], endian)) }, + 32 => .{ .f32 = @bitCast(f32, std.mem.readInt(u32, buffer[0..4], endian)) }, + 64 => .{ .f64 = @bitCast(f64, std.mem.readInt(u64, buffer[0..8], endian)) }, + 80 => .{ .f80 = @bitCast(f80, std.mem.readInt(u80, buffer[0..10], endian)) }, + 128 => .{ .f128 = @bitCast(f128, std.mem.readInt(u128, buffer[0..16], endian)) }, + else => unreachable, + }, + } })).toValue(), .Array => { const elem_ty = ty.childType(mod); const elem_size = elem_ty.abiSize(mod); @@ -1294,14 +1277,17 @@ pub const Value = struct { return mod.intValue_big(ty, bigint.toConst()); } }, - .Float => switch (ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, @bitCast(f16, std.mem.readPackedInt(u16, buffer, bit_offset, endian))), - 32 => return Value.Tag.float_32.create(arena, @bitCast(f32, std.mem.readPackedInt(u32, buffer, bit_offset, endian))), - 64 => return Value.Tag.float_64.create(arena, @bitCast(f64, std.mem.readPackedInt(u64, buffer, bit_offset, endian))), - 80 => return Value.Tag.float_80.create(arena, @bitCast(f80, std.mem.readPackedInt(u80, buffer, bit_offset, endian))), - 128 => return Value.Tag.float_128.create(arena, @bitCast(f128, std.mem.readPackedInt(u128, buffer, bit_offset, endian))), - else => unreachable, - }, + .Float => return (try mod.intern(.{ .float = .{ + .ty = ty.ip_index, + .storage = switch (ty.floatBits(target)) { + 16 => .{ .f16 = @bitCast(f16, std.mem.readPackedInt(u16, buffer, bit_offset, endian)) }, + 32 => .{ .f32 = @bitCast(f32, std.mem.readPackedInt(u32, buffer, bit_offset, endian)) }, + 64 => .{ .f64 = @bitCast(f64, std.mem.readPackedInt(u64, buffer, bit_offset, endian)) }, + 80 => .{ .f80 = @bitCast(f80, std.mem.readPackedInt(u80, buffer, bit_offset, endian)) }, + 128 => .{ .f128 = @bitCast(f128, std.mem.readPackedInt(u128, buffer, bit_offset, endian)) }, + else => unreachable, + }, + } })).toValue(), .Vector => { const elem_ty = ty.childType(mod); const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); @@ -1346,28 +1332,20 @@ pub const Value = struct { /// Asserts that the value is a float or an integer. pub fn toFloat(val: Value, comptime T: type, mod: *const Module) T { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .float_16 => @floatCast(T, val.castTag(.float_16).?.data), - .float_32 => @floatCast(T, val.castTag(.float_32).?.data), - .float_64 => @floatCast(T, val.castTag(.float_64).?.data), - .float_80 => @floatCast(T, val.castTag(.float_80).?.data), - .float_128 => @floatCast(T, val.castTag(.float_128).?.data), - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), - inline .u64, .i64 => |x| { - if (T == f80) { - @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); - } - return @intToFloat(T, x); - }, + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), + inline .u64, .i64 => |x| { + if (T == f80) { + @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); + } + return @intToFloat(T, x); }, - else => unreachable, }, + .float => |float| switch (float.storage) { + inline else => |x| @floatCast(T, x), + }, + else => unreachable, }; } @@ -1552,28 +1530,27 @@ pub const Value = struct { /// Converts an integer or a float to a float. May result in a loss of information. /// Caller can find out by equality checking the result against the operand. - pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type, mod: *const Module) !Value { + pub fn floatCast(self: Value, dest_ty: Type, mod: *Module) !Value { const target = mod.getTarget(); - switch (dest_ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, self.toFloat(f16, mod)), - 32 => return Value.Tag.float_32.create(arena, self.toFloat(f32, mod)), - 64 => return Value.Tag.float_64.create(arena, self.toFloat(f64, mod)), - 80 => return Value.Tag.float_80.create(arena, self.toFloat(f80, mod)), - 128 => return Value.Tag.float_128.create(arena, self.toFloat(f128, mod)), - else => unreachable, - } + return (try mod.intern(.{ .float = .{ + .ty = dest_ty.ip_index, + .storage = switch (dest_ty.floatBits(target)) { + 16 => .{ .f16 = self.toFloat(f16, mod) }, + 32 => .{ .f32 = self.toFloat(f32, mod) }, + 64 => .{ .f64 = self.toFloat(f64, mod) }, + 80 => .{ .f80 = self.toFloat(f80, mod) }, + 128 => .{ .f128 = self.toFloat(f128, mod) }, + else => unreachable, + }, + } })).toValue(); } /// Asserts the value is a float - pub fn floatHasFraction(self: Value) bool { - return switch (self.tag()) { - .float_16 => @rem(self.castTag(.float_16).?.data, 1) != 0, - .float_32 => @rem(self.castTag(.float_32).?.data, 1) != 0, - .float_64 => @rem(self.castTag(.float_64).?.data, 1) != 0, - //.float_80 => @rem(self.castTag(.float_80).?.data, 1) != 0, - .float_80 => @panic("TODO implement __remx in compiler-rt"), - .float_128 => @rem(self.castTag(.float_128).?.data, 1) != 0, - + pub fn floatHasFraction(self: Value, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(self.ip_index)) { + .float => |float| switch (float.storage) { + inline else => |x| @rem(x, 1) != 0, + }, else => unreachable, }; } @@ -1634,12 +1611,6 @@ pub const Value = struct { } }, - .float_16 => std.math.order(lhs.castTag(.float_16).?.data, 0), - .float_32 => std.math.order(lhs.castTag(.float_32).?.data, 0), - .float_64 => std.math.order(lhs.castTag(.float_64).?.data, 0), - .float_80 => std.math.order(lhs.castTag(.float_80).?.data, 0), - .float_128 => std.math.order(lhs.castTag(.float_128).?.data, 0), - .elem_ptr => { const elem_ptr = lhs.castTag(.elem_ptr).?.data; switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(mod, opt_sema)) { @@ -1662,6 +1633,9 @@ pub const Value = struct { .big_int => |big_int| big_int.orderAgainstScalar(0), inline .u64, .i64 => |x| std.math.order(x, 0), }, + .float => |float| switch (float.storage) { + inline else => |x| std.math.order(x, 0), + }, else => unreachable, }, } @@ -1688,20 +1662,21 @@ pub const Value = struct { .gt => {}, } - const lhs_float = lhs.isFloat(); - const rhs_float = rhs.isFloat(); + const lhs_float = lhs.isFloat(mod); + const rhs_float = rhs.isFloat(mod); if (lhs_float and rhs_float) { const lhs_tag = lhs.tag(); const rhs_tag = rhs.tag(); if (lhs_tag == rhs_tag) { - return switch (lhs.tag()) { - .float_16 => return std.math.order(lhs.castTag(.float_16).?.data, rhs.castTag(.float_16).?.data), - .float_32 => return std.math.order(lhs.castTag(.float_32).?.data, rhs.castTag(.float_32).?.data), - .float_64 => return std.math.order(lhs.castTag(.float_64).?.data, rhs.castTag(.float_64).?.data), - .float_80 => return std.math.order(lhs.castTag(.float_80).?.data, rhs.castTag(.float_80).?.data), - .float_128 => return std.math.order(lhs.castTag(.float_128).?.data, rhs.castTag(.float_128).?.data), - else => unreachable, + const lhs_storage = mod.intern_pool.indexToKey(lhs.ip_index).float.storage; + const rhs_storage = mod.intern_pool.indexToKey(rhs.ip_index).float.storage; + const lhs128: f128 = switch (lhs_storage) { + inline else => |x| x, }; + const rhs128: f128 = switch (rhs_storage) { + inline else => |x| x, + }; + return std.math.order(lhs128, rhs128); } } if (lhs_float or rhs_float) { @@ -1808,12 +1783,12 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!bool { - if (lhs.isInf()) { + if (lhs.isInf(mod)) { switch (op) { .neq => return true, .eq => return false, - .gt, .gte => return !lhs.isNegativeInf(), - .lt, .lte => return lhs.isNegativeInf(), + .gt, .gte => return !lhs.isNegativeInf(mod), + .lt, .lte => return lhs.isNegativeInf(mod), } } @@ -1841,14 +1816,14 @@ pub const Value = struct { } return true; }, - .float_16 => if (std.math.isNan(lhs.castTag(.float_16).?.data)) return op == .neq, - .float_32 => if (std.math.isNan(lhs.castTag(.float_32).?.data)) return op == .neq, - .float_64 => if (std.math.isNan(lhs.castTag(.float_64).?.data)) return op == .neq, - .float_80 => if (std.math.isNan(lhs.castTag(.float_80).?.data)) return op == .neq, - .float_128 => if (std.math.isNan(lhs.castTag(.float_128).?.data)) return op == .neq, else => {}, }, - else => {}, + else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) { + .float => |float| switch (float.storage) { + inline else => |x| if (std.math.isNan(x)) return op == .neq, + }, + else => {}, + }, } return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); } @@ -2919,22 +2894,18 @@ pub const Value = struct { } /// Valid for all types. Asserts the value is not undefined. - pub fn isFloat(self: Value) bool { + pub fn isFloat(self: Value, mod: *const Module) bool { return switch (self.ip_index) { .undef => unreachable, .none => switch (self.tag()) { .inferred_alloc => unreachable, .inferred_alloc_comptime => unreachable, - - .float_16, - .float_32, - .float_64, - .float_80, - .float_128, - => true, else => false, }, - else => false, + else => switch (mod.intern_pool.indexToKey(self.ip_index)) { + .float => true, + else => false, + }, }; } @@ -2951,33 +2922,32 @@ pub const Value = struct { const scalar_ty = float_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try intToFloatScalar(elem_val, arena, scalar_ty, mod, opt_sema); + scalar.* = try intToFloatScalar(elem_val, scalar_ty, mod, opt_sema); } return Value.Tag.aggregate.create(arena, result_data); } - return intToFloatScalar(val, arena, float_ty, mod, opt_sema); + return intToFloatScalar(val, float_ty, mod, opt_sema); } - pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - const target = mod.getTarget(); + pub fn intToFloatScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { switch (val.ip_index) { .undef => return val, .none => switch (val.tag()) { - .the_only_possible_value => return Value.float_zero, // for i0, u0 + .the_only_possible_value => return mod.floatValue(float_ty, 0), // for i0, u0 .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { - return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); + return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); } else { - return intToFloatInner(ty.abiAlignment(mod), arena, float_ty, target); + return intToFloatInner(ty.abiAlignment(mod), float_ty, mod); } }, .lazy_size => { const ty = val.castTag(.lazy_size).?.data; if (opt_sema) |sema| { - return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); + return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); } else { - return intToFloatInner(ty.abiSize(mod), arena, float_ty, target); + return intToFloatInner(ty.abiSize(mod), float_ty, mod); } }, else => unreachable, @@ -2986,35 +2956,29 @@ pub const Value = struct { .int => |int| switch (int.storage) { .big_int => |big_int| { const float = bigIntToFloat(big_int.limbs, big_int.positive); - return floatToValue(float, arena, float_ty, target); + return mod.floatValue(float_ty, float); }, - inline .u64, .i64 => |x| intToFloatInner(x, arena, float_ty, target), + inline .u64, .i64 => |x| intToFloatInner(x, float_ty, mod), }, else => unreachable, }, } } - fn intToFloatInner(x: anytype, arena: Allocator, dest_ty: Type, target: Target) !Value { - switch (dest_ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, @intToFloat(f16, x)), - 32 => return Value.Tag.float_32.create(arena, @intToFloat(f32, x)), - 64 => return Value.Tag.float_64.create(arena, @intToFloat(f64, x)), - 80 => return Value.Tag.float_80.create(arena, @intToFloat(f80, x)), - 128 => return Value.Tag.float_128.create(arena, @intToFloat(f128, x)), - else => unreachable, - } - } - - pub fn floatToValue(float: f128, arena: Allocator, dest_ty: Type, target: Target) !Value { - switch (dest_ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, @floatCast(f16, float)), - 32 => return Value.Tag.float_32.create(arena, @floatCast(f32, float)), - 64 => return Value.Tag.float_64.create(arena, @floatCast(f64, float)), - 80 => return Value.Tag.float_80.create(arena, @floatCast(f80, float)), - 128 => return Value.Tag.float_128.create(arena, float), + fn intToFloatInner(x: anytype, dest_ty: Type, mod: *Module) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) { + 16 => .{ .f16 = @intToFloat(f16, x) }, + 32 => .{ .f32 = @intToFloat(f32, x) }, + 64 => .{ .f64 = @intToFloat(f64, x) }, + 80 => .{ .f80 = @intToFloat(f80, x) }, + 128 => .{ .f128 = @intToFloat(f128, x) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = dest_ty.ip_index, + .storage = storage, + } })).toValue(); } fn calcLimbLenFloat(scalar: anytype) usize { @@ -3286,8 +3250,8 @@ pub const Value = struct { /// Supports both floats and ints; handles undefined. pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value { if (lhs.isUndef() or rhs.isUndef()) return undef; - if (lhs.isNan()) return rhs; - if (rhs.isNan()) return lhs; + if (lhs.isNan(mod)) return rhs; + if (rhs.isNan(mod)) return lhs; return switch (order(lhs, rhs, mod)) { .lt => rhs, @@ -3298,8 +3262,8 @@ pub const Value = struct { /// Supports both floats and ints; handles undefined. pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value { if (lhs.isUndef() or rhs.isUndef()) return undef; - if (lhs.isNan()) return rhs; - if (rhs.isNan()) return lhs; + if (lhs.isNan(mod)) return rhs; + if (rhs.isNan(mod)) return lhs; return switch (order(lhs, rhs, mod)) { .lt => lhs, @@ -3587,44 +3551,32 @@ pub const Value = struct { } /// Returns true if the value is a floating point type and is NaN. Returns false otherwise. - pub fn isNan(val: Value) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .float_16 => std.math.isNan(val.castTag(.float_16).?.data), - .float_32 => std.math.isNan(val.castTag(.float_32).?.data), - .float_64 => std.math.isNan(val.castTag(.float_64).?.data), - .float_80 => std.math.isNan(val.castTag(.float_80).?.data), - .float_128 => std.math.isNan(val.castTag(.float_128).?.data), - else => false, + pub fn isNan(val: Value, mod: *const Module) bool { + if (val.ip_index == .none) return false; + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .float => |float| switch (float.storage) { + inline else => |x| std.math.isNan(x), }, else => false, }; } /// Returns true if the value is a floating point type and is infinite. Returns false otherwise. - pub fn isInf(val: Value) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .float_16 => std.math.isInf(val.castTag(.float_16).?.data), - .float_32 => std.math.isInf(val.castTag(.float_32).?.data), - .float_64 => std.math.isInf(val.castTag(.float_64).?.data), - .float_80 => std.math.isInf(val.castTag(.float_80).?.data), - .float_128 => std.math.isInf(val.castTag(.float_128).?.data), - else => false, + pub fn isInf(val: Value, mod: *const Module) bool { + if (val.ip_index == .none) return false; + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .float => |float| switch (float.storage) { + inline else => |x| std.math.isInf(x), }, else => false, }; } - pub fn isNegativeInf(val: Value) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .float_16 => std.math.isNegativeInf(val.castTag(.float_16).?.data), - .float_32 => std.math.isNegativeInf(val.castTag(.float_32).?.data), - .float_64 => std.math.isNegativeInf(val.castTag(.float_64).?.data), - .float_80 => std.math.isNegativeInf(val.castTag(.float_80).?.data), - .float_128 => std.math.isNegativeInf(val.castTag(.float_128).?.data), - else => false, + pub fn isNegativeInf(val: Value, mod: *const Module) bool { + if (val.ip_index == .none) return false; + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .float => |float| switch (float.storage) { + inline else => |x| std.math.isNegativeInf(x), }, else => false, }; @@ -3636,43 +3588,27 @@ pub const Value = struct { for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); + scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatRemScalar(lhs, rhs, float_type, arena, mod); + return floatRemScalar(lhs, rhs, float_type, mod); } - pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *const Module) !Value { + pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, mod: *Module) !Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16, mod); - const rhs_val = rhs.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @rem(lhs_val, rhs_val)); - }, - 32 => { - const lhs_val = lhs.toFloat(f32, mod); - const rhs_val = rhs.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @rem(lhs_val, rhs_val)); - }, - 64 => { - const lhs_val = lhs.toFloat(f64, mod); - const rhs_val = rhs.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @rem(lhs_val, rhs_val)); - }, - 80 => { - const lhs_val = lhs.toFloat(f80, mod); - const rhs_val = rhs.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @rem(lhs_val, rhs_val)); - }, - 128 => { - const lhs_val = lhs.toFloat(f128, mod); - const rhs_val = rhs.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @rem(lhs_val, rhs_val)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @rem(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, + 32 => .{ .f32 = @rem(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, + 64 => .{ .f64 = @rem(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, + 80 => .{ .f80 = @rem(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, + 128 => .{ .f128 = @rem(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -3681,43 +3617,27 @@ pub const Value = struct { for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); + scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatModScalar(lhs, rhs, float_type, arena, mod); + return floatModScalar(lhs, rhs, float_type, mod); } - pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *const Module) !Value { + pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, mod: *Module) !Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16, mod); - const rhs_val = rhs.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @mod(lhs_val, rhs_val)); - }, - 32 => { - const lhs_val = lhs.toFloat(f32, mod); - const rhs_val = rhs.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @mod(lhs_val, rhs_val)); - }, - 64 => { - const lhs_val = lhs.toFloat(f64, mod); - const rhs_val = rhs.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @mod(lhs_val, rhs_val)); - }, - 80 => { - const lhs_val = lhs.toFloat(f80, mod); - const rhs_val = rhs.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @mod(lhs_val, rhs_val)); - }, - 128 => { - const lhs_val = lhs.toFloat(f128, mod); - const rhs_val = rhs.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @mod(lhs_val, rhs_val)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @mod(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, + 32 => .{ .f32 = @mod(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, + 64 => .{ .f64 = @mod(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, + 80 => .{ .f80 = @mod(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, + 128 => .{ .f128 = @mod(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { @@ -4035,28 +3955,111 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatNegScalar(val, float_type, arena, mod); + return floatNegScalar(val, float_type, mod); } pub fn floatNegScalar( val: Value, float_type: Type, + mod: *Module, + ) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = -val.toFloat(f16, mod) }, + 32 => .{ .f32 = -val.toFloat(f32, mod) }, + 64 => .{ .f64 = -val.toFloat(f64, mod) }, + 80 => .{ .f80 = -val.toFloat(f80, mod) }, + 128 => .{ .f128 = -val.toFloat(f128, mod) }, + else => unreachable, + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); + } + + pub fn floatAdd( + lhs: Value, + rhs: Value, + float_type: Type, arena: Allocator, - mod: *const Module, + mod: *Module, + ) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + } + return Value.Tag.aggregate.create(arena, result_data); + } + return floatAddScalar(lhs, rhs, float_type, mod); + } + + pub fn floatAddScalar( + lhs: Value, + rhs: Value, + float_type: Type, + mod: *Module, ) !Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, -val.toFloat(f16, mod)), - 32 => return Value.Tag.float_32.create(arena, -val.toFloat(f32, mod)), - 64 => return Value.Tag.float_64.create(arena, -val.toFloat(f64, mod)), - 80 => return Value.Tag.float_80.create(arena, -val.toFloat(f80, mod)), - 128 => return Value.Tag.float_128.create(arena, -val.toFloat(f128, mod)), + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = lhs.toFloat(f16, mod) + rhs.toFloat(f16, mod) }, + 32 => .{ .f32 = lhs.toFloat(f32, mod) + rhs.toFloat(f32, mod) }, + 64 => .{ .f64 = lhs.toFloat(f64, mod) + rhs.toFloat(f64, mod) }, + 80 => .{ .f80 = lhs.toFloat(f80, mod) + rhs.toFloat(f80, mod) }, + 128 => .{ .f128 = lhs.toFloat(f128, mod) + rhs.toFloat(f128, mod) }, else => unreachable, + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); + } + + pub fn floatSub( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + mod: *Module, + ) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + } + return Value.Tag.aggregate.create(arena, result_data); } + return floatSubScalar(lhs, rhs, float_type, mod); + } + + pub fn floatSubScalar( + lhs: Value, + rhs: Value, + float_type: Type, + mod: *Module, + ) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = lhs.toFloat(f16, mod) - rhs.toFloat(f16, mod) }, + 32 => .{ .f32 = lhs.toFloat(f32, mod) - rhs.toFloat(f32, mod) }, + 64 => .{ .f64 = lhs.toFloat(f64, mod) - rhs.toFloat(f64, mod) }, + 80 => .{ .f80 = lhs.toFloat(f80, mod) - rhs.toFloat(f80, mod) }, + 128 => .{ .f128 = lhs.toFloat(f128, mod) - rhs.toFloat(f128, mod) }, + else => unreachable, + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn floatDiv( @@ -4071,49 +4074,32 @@ pub const Value = struct { for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); + scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatDivScalar(lhs, rhs, float_type, arena, mod); + return floatDivScalar(lhs, rhs, float_type, mod); } pub fn floatDivScalar( lhs: Value, rhs: Value, float_type: Type, - arena: Allocator, - mod: *const Module, + mod: *Module, ) !Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16, mod); - const rhs_val = rhs.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, lhs_val / rhs_val); - }, - 32 => { - const lhs_val = lhs.toFloat(f32, mod); - const rhs_val = rhs.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, lhs_val / rhs_val); - }, - 64 => { - const lhs_val = lhs.toFloat(f64, mod); - const rhs_val = rhs.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, lhs_val / rhs_val); - }, - 80 => { - const lhs_val = lhs.toFloat(f80, mod); - const rhs_val = rhs.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, lhs_val / rhs_val); - }, - 128 => { - const lhs_val = lhs.toFloat(f128, mod); - const rhs_val = rhs.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, lhs_val / rhs_val); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = lhs.toFloat(f16, mod) / rhs.toFloat(f16, mod) }, + 32 => .{ .f32 = lhs.toFloat(f32, mod) / rhs.toFloat(f32, mod) }, + 64 => .{ .f64 = lhs.toFloat(f64, mod) / rhs.toFloat(f64, mod) }, + 80 => .{ .f80 = lhs.toFloat(f80, mod) / rhs.toFloat(f80, mod) }, + 128 => .{ .f128 = lhs.toFloat(f128, mod) / rhs.toFloat(f128, mod) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn floatDivFloor( @@ -4128,49 +4114,32 @@ pub const Value = struct { for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); + scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatDivFloorScalar(lhs, rhs, float_type, arena, mod); + return floatDivFloorScalar(lhs, rhs, float_type, mod); } pub fn floatDivFloorScalar( lhs: Value, rhs: Value, float_type: Type, - arena: Allocator, - mod: *const Module, + mod: *Module, ) !Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16, mod); - const rhs_val = rhs.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @divFloor(lhs_val, rhs_val)); - }, - 32 => { - const lhs_val = lhs.toFloat(f32, mod); - const rhs_val = rhs.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @divFloor(lhs_val, rhs_val)); - }, - 64 => { - const lhs_val = lhs.toFloat(f64, mod); - const rhs_val = rhs.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @divFloor(lhs_val, rhs_val)); - }, - 80 => { - const lhs_val = lhs.toFloat(f80, mod); - const rhs_val = rhs.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @divFloor(lhs_val, rhs_val)); - }, - 128 => { - const lhs_val = lhs.toFloat(f128, mod); - const rhs_val = rhs.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @divFloor(lhs_val, rhs_val)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @divFloor(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, + 32 => .{ .f32 = @divFloor(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, + 64 => .{ .f64 = @divFloor(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, + 80 => .{ .f80 = @divFloor(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, + 128 => .{ .f128 = @divFloor(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn floatDivTrunc( @@ -4185,49 +4154,32 @@ pub const Value = struct { for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); + scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatDivTruncScalar(lhs, rhs, float_type, arena, mod); + return floatDivTruncScalar(lhs, rhs, float_type, mod); } pub fn floatDivTruncScalar( lhs: Value, rhs: Value, float_type: Type, - arena: Allocator, - mod: *const Module, + mod: *Module, ) !Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16, mod); - const rhs_val = rhs.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @divTrunc(lhs_val, rhs_val)); - }, - 32 => { - const lhs_val = lhs.toFloat(f32, mod); - const rhs_val = rhs.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @divTrunc(lhs_val, rhs_val)); - }, - 64 => { - const lhs_val = lhs.toFloat(f64, mod); - const rhs_val = rhs.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @divTrunc(lhs_val, rhs_val)); - }, - 80 => { - const lhs_val = lhs.toFloat(f80, mod); - const rhs_val = rhs.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @divTrunc(lhs_val, rhs_val)); - }, - 128 => { - const lhs_val = lhs.toFloat(f128, mod); - const rhs_val = rhs.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @divTrunc(lhs_val, rhs_val)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @divTrunc(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, + 32 => .{ .f32 = @divTrunc(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, + 64 => .{ .f64 = @divTrunc(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, + 80 => .{ .f80 = @divTrunc(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, + 128 => .{ .f128 = @divTrunc(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn floatMul( @@ -4242,49 +4194,32 @@ pub const Value = struct { for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); + scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatMulScalar(lhs, rhs, float_type, arena, mod); + return floatMulScalar(lhs, rhs, float_type, mod); } pub fn floatMulScalar( lhs: Value, rhs: Value, float_type: Type, - arena: Allocator, - mod: *const Module, + mod: *Module, ) !Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16, mod); - const rhs_val = rhs.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, lhs_val * rhs_val); - }, - 32 => { - const lhs_val = lhs.toFloat(f32, mod); - const rhs_val = rhs.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, lhs_val * rhs_val); - }, - 64 => { - const lhs_val = lhs.toFloat(f64, mod); - const rhs_val = rhs.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, lhs_val * rhs_val); - }, - 80 => { - const lhs_val = lhs.toFloat(f80, mod); - const rhs_val = rhs.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, lhs_val * rhs_val); - }, - 128 => { - const lhs_val = lhs.toFloat(f128, mod); - const rhs_val = rhs.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, lhs_val * rhs_val); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = lhs.toFloat(f16, mod) * rhs.toFloat(f16, mod) }, + 32 => .{ .f32 = lhs.toFloat(f32, mod) * rhs.toFloat(f32, mod) }, + 64 => .{ .f64 = lhs.toFloat(f64, mod) * rhs.toFloat(f64, mod) }, + 80 => .{ .f80 = lhs.toFloat(f80, mod) * rhs.toFloat(f80, mod) }, + 128 => .{ .f128 = lhs.toFloat(f128, mod) * rhs.toFloat(f128, mod) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4292,38 +4227,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return sqrtScalar(val, float_type, arena, mod); + return sqrtScalar(val, float_type, mod); } - pub fn sqrtScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn sqrtScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @sqrt(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @sqrt(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @sqrt(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @sqrt(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @sqrt(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @sqrt(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @sqrt(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @sqrt(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @sqrt(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @sqrt(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4331,38 +4255,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return sinScalar(val, float_type, arena, mod); + return sinScalar(val, float_type, mod); } - pub fn sinScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn sinScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @sin(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @sin(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @sin(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @sin(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @sin(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @sin(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @sin(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @sin(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @sin(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @sin(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4370,38 +4283,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return cosScalar(val, float_type, arena, mod); + return cosScalar(val, float_type, mod); } - pub fn cosScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn cosScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @cos(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @cos(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @cos(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @cos(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @cos(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @cos(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @cos(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @cos(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @cos(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @cos(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4409,38 +4311,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return tanScalar(val, float_type, arena, mod); + return tanScalar(val, float_type, mod); } - pub fn tanScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn tanScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @tan(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @tan(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @tan(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @tan(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @tan(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @tan(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @tan(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @tan(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @tan(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @tan(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4448,38 +4339,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try expScalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try expScalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return expScalar(val, float_type, arena, mod); + return expScalar(val, float_type, mod); } - pub fn expScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn expScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @exp(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @exp(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @exp(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @exp(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @exp(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @exp(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @exp(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @exp(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @exp(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @exp(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4487,38 +4367,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return exp2Scalar(val, float_type, arena, mod); + return exp2Scalar(val, float_type, mod); } - pub fn exp2Scalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn exp2Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @exp2(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @exp2(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @exp2(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @exp2(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @exp2(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @exp2(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @exp2(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @exp2(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @exp2(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @exp2(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4526,38 +4395,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try logScalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try logScalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return logScalar(val, float_type, arena, mod); + return logScalar(val, float_type, mod); } - pub fn logScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn logScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @log(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @log(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @log(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @log(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @log(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @log(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @log(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @log(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @log(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @log(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4565,38 +4423,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return log2Scalar(val, float_type, arena, mod); + return log2Scalar(val, float_type, mod); } - pub fn log2Scalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn log2Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @log2(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @log2(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @log2(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @log2(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @log2(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @log2(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @log2(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @log2(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @log2(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @log2(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4604,38 +4451,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return log10Scalar(val, float_type, arena, mod); + return log10Scalar(val, float_type, mod); } - pub fn log10Scalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn log10Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @log10(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @log10(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @log10(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @log10(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @log10(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @log10(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @log10(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @log10(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @log10(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @log10(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4643,38 +4479,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return fabsScalar(val, float_type, arena, mod); + return fabsScalar(val, float_type, mod); } - pub fn fabsScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn fabsScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @fabs(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @fabs(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @fabs(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @fabs(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @fabs(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @fabs(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @fabs(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @fabs(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @fabs(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @fabs(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4682,38 +4507,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floorScalar(val, float_type, arena, mod); + return floorScalar(val, float_type, mod); } - pub fn floorScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn floorScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @floor(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @floor(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @floor(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @floor(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @floor(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @floor(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @floor(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @floor(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @floor(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @floor(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4721,38 +4535,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return ceilScalar(val, float_type, arena, mod); + return ceilScalar(val, float_type, mod); } - pub fn ceilScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn ceilScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @ceil(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @ceil(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @ceil(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @ceil(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @ceil(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @ceil(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @ceil(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @ceil(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @ceil(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @ceil(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4760,38 +4563,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return roundScalar(val, float_type, arena, mod); + return roundScalar(val, float_type, mod); } - pub fn roundScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn roundScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @round(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @round(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @round(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @round(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @round(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @round(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @round(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @round(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @round(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @round(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { @@ -4799,38 +4591,27 @@ pub const Value = struct { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), arena, mod); + scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), mod); } return Value.Tag.aggregate.create(arena, result_data); } - return truncScalar(val, float_type, arena, mod); + return truncScalar(val, float_type, mod); } - pub fn truncScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + pub fn truncScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const f = val.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @trunc(f)); - }, - 32 => { - const f = val.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @trunc(f)); - }, - 64 => { - const f = val.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @trunc(f)); - }, - 80 => { - const f = val.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @trunc(f)); - }, - 128 => { - const f = val.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @trunc(f)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @trunc(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @trunc(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @trunc(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @trunc(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @trunc(val.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } pub fn mulAdd( @@ -4852,13 +4633,12 @@ pub const Value = struct { mulend1_elem, mulend2_elem, addend_elem, - arena, mod, ); } return Value.Tag.aggregate.create(arena, result_data); } - return mulAddScalar(float_type, mulend1, mulend2, addend, arena, mod); + return mulAddScalar(float_type, mulend1, mulend2, addend, mod); } pub fn mulAddScalar( @@ -4866,43 +4646,21 @@ pub const Value = struct { mulend1: Value, mulend2: Value, addend: Value, - arena: Allocator, - mod: *const Module, + mod: *Module, ) Allocator.Error!Value { const target = mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const m1 = mulend1.toFloat(f16, mod); - const m2 = mulend2.toFloat(f16, mod); - const a = addend.toFloat(f16, mod); - return Value.Tag.float_16.create(arena, @mulAdd(f16, m1, m2, a)); - }, - 32 => { - const m1 = mulend1.toFloat(f32, mod); - const m2 = mulend2.toFloat(f32, mod); - const a = addend.toFloat(f32, mod); - return Value.Tag.float_32.create(arena, @mulAdd(f32, m1, m2, a)); - }, - 64 => { - const m1 = mulend1.toFloat(f64, mod); - const m2 = mulend2.toFloat(f64, mod); - const a = addend.toFloat(f64, mod); - return Value.Tag.float_64.create(arena, @mulAdd(f64, m1, m2, a)); - }, - 80 => { - const m1 = mulend1.toFloat(f80, mod); - const m2 = mulend2.toFloat(f80, mod); - const a = addend.toFloat(f80, mod); - return Value.Tag.float_80.create(arena, @mulAdd(f80, m1, m2, a)); - }, - 128 => { - const m1 = mulend1.toFloat(f128, mod); - const m2 = mulend2.toFloat(f128, mod); - const a = addend.toFloat(f128, mod); - return Value.Tag.float_128.create(arena, @mulAdd(f128, m1, m2, a)); - }, + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @mulAdd(f16, mulend1.toFloat(f16, mod), mulend2.toFloat(f16, mod), addend.toFloat(f16, mod)) }, + 32 => .{ .f32 = @mulAdd(f32, mulend1.toFloat(f32, mod), mulend2.toFloat(f32, mod), addend.toFloat(f32, mod)) }, + 64 => .{ .f64 = @mulAdd(f64, mulend1.toFloat(f64, mod), mulend2.toFloat(f64, mod), addend.toFloat(f64, mod)) }, + 80 => .{ .f80 = @mulAdd(f80, mulend1.toFloat(f80, mod), mulend2.toFloat(f80, mod), addend.toFloat(f80, mod)) }, + 128 => .{ .f128 = @mulAdd(f128, mulend1.toFloat(f128, mod), mulend2.toFloat(f128, mod), addend.toFloat(f128, mod)) }, else => unreachable, - } + }; + return (try mod.intern(.{ .float = .{ + .ty = float_type.ip_index, + .storage = storage, + } })).toValue(); } /// If the value is represented in-memory as a series of bytes that all @@ -5053,41 +4811,6 @@ pub const Value = struct { data: Type, }; - pub const Float_16 = struct { - pub const base_tag = Tag.float_16; - - base: Payload = .{ .tag = base_tag }, - data: f16, - }; - - pub const Float_32 = struct { - pub const base_tag = Tag.float_32; - - base: Payload = .{ .tag = base_tag }, - data: f32, - }; - - pub const Float_64 = struct { - pub const base_tag = Tag.float_64; - - base: Payload = .{ .tag = base_tag }, - data: f64, - }; - - pub const Float_80 = struct { - pub const base_tag = Tag.float_80; - - base: Payload = .{ .tag = base_tag }, - data: f80, - }; - - pub const Float_128 = struct { - pub const base_tag = Tag.float_128; - - base: Payload = .{ .tag = base_tag }, - data: f128, - }; - pub const Error = struct { base: Payload = .{ .tag = .@"error" }, data: struct { @@ -5152,7 +4875,6 @@ pub const Value = struct { pub const one_comptime_int: Value = .{ .ip_index = .one, .legacy = undefined }; pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one, .legacy = undefined }; pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; - pub const float_zero: Value = .{ .ip_index = .zero, .legacy = undefined }; // TODO: replace this! pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined }; pub const @"false": Value = .{ .ip_index = .bool_false, .legacy = undefined }; From d89807efbb1bd5af0a92544298fc08ad6ba2d255 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 12 May 2023 10:59:04 -0700 Subject: [PATCH 064/205] stage2: remove legacy Type array and array_sentinel These are now handled by the InternPool. --- src/Module.zig | 5 +- src/Sema.zig | 52 ++++------- src/codegen/c.zig | 8 +- src/type.zig | 214 ++-------------------------------------------- 4 files changed, 32 insertions(+), 247 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index b32904e16557..426d27401150 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6543,9 +6543,10 @@ pub fn populateTestFunctions( const test_fn_vals = try arena.alloc(Value, mod.test_functions.count()); const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ - .ty = try Type.Tag.array.create(arena, .{ + .ty = try mod.arrayType(.{ .len = test_fn_vals.len, - .elem_type = try tmp_test_fn_ty.copy(arena), + .child = tmp_test_fn_ty.ip_index, + .sentinel = .none, }), .val = try Value.Tag.aggregate.create(arena, test_fn_vals), }); diff --git a/src/Sema.zig b/src/Sema.zig index b5baf4dde392..9e89ca89ef94 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -16015,9 +16015,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const param_info_decl = mod.declPtr(param_info_decl_index); const param_ty = param_info_decl.val.toType(); const new_decl = try params_anon_decl.finish( - try Type.Tag.array.create(params_anon_decl.arena(), .{ + try mod.arrayType(.{ .len = param_vals.len, - .elem_type = try param_ty.copy(params_anon_decl.arena()), + .child = param_ty.ip_index, + .sentinel = .none, }), try Value.Tag.aggregate.create( params_anon_decl.arena(), @@ -16238,9 +16239,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Build our ?[]const Error value const errors_val = if (error_field_vals) |vals| v: { const new_decl = try fields_anon_decl.finish( - try Type.Tag.array.create(fields_anon_decl.arena(), .{ + try mod.arrayType(.{ .len = vals.len, - .elem_type = error_field_ty, + .child = error_field_ty.ip_index, + .sentinel = .none, }), try Value.Tag.aggregate.create( fields_anon_decl.arena(), @@ -16332,9 +16334,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fields_val = v: { const new_decl = try fields_anon_decl.finish( - try Type.Tag.array.create(fields_anon_decl.arena(), .{ + try mod.arrayType(.{ .len = enum_field_vals.len, - .elem_type = enum_field_ty, + .child = enum_field_ty.ip_index, + .sentinel = .none, }), try Value.Tag.aggregate.create( fields_anon_decl.arena(), @@ -16427,9 +16430,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fields_val = v: { const new_decl = try fields_anon_decl.finish( - try Type.Tag.array.create(fields_anon_decl.arena(), .{ + try mod.arrayType(.{ .len = union_field_vals.len, - .elem_type = union_field_ty, + .child = union_field_ty.ip_index, + .sentinel = .none, }), try Value.Tag.aggregate.create( fields_anon_decl.arena(), @@ -16590,9 +16594,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fields_val = v: { const new_decl = try fields_anon_decl.finish( - try Type.Tag.array.create(fields_anon_decl.arena(), .{ + try mod.arrayType(.{ .len = struct_field_vals.len, - .elem_type = struct_field_ty, + .child = struct_field_ty.ip_index, + .sentinel = .none, }), try Value.Tag.aggregate.create( fields_anon_decl.arena(), @@ -16707,9 +16712,10 @@ fn typeInfoDecls( } const new_decl = try decls_anon_decl.finish( - try Type.Tag.array.create(decls_anon_decl.arena(), .{ + try mod.arrayType(.{ .len = decl_vals.items.len, - .elem_type = declaration_ty, + .child = declaration_ty.ip_index, + .sentinel = .none, }), try Value.Tag.aggregate.create( decls_anon_decl.arena(), @@ -31574,10 +31580,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, - .array, - .array_sentinel, - => return sema.resolveTypeRequiresComptime(ty.childType(mod)), - .pointer => { const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { @@ -32862,7 +32864,6 @@ fn generateUnionTagTypeNumbered( new_decl.name_fully_qualified = true; new_decl.owns_tv = true; new_decl.name_fully_qualified = true; - errdefer mod.abortAnonDecl(new_decl_index); const enum_ty = try mod.intern(.{ .enum_type = .{ .decl = new_decl_index, @@ -32875,7 +32876,6 @@ fn generateUnionTagTypeNumbered( .values = enum_field_vals, .tag_mode = .explicit, } }); - errdefer mod.intern_pool.remove(enum_ty); new_decl.val = enum_ty.toValue(); @@ -32924,12 +32924,10 @@ fn generateUnionTagTypeSimple( .values = &.{}, .tag_mode = .auto, } }); - errdefer mod.intern_pool.remove(enum_ty); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; new_decl.val = enum_ty.toValue(); - errdefer mod.abortAnonDecl(new_decl_index); return enum_ty.toType(); } @@ -33024,7 +33022,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set_merged, .error_union, .function, - .array_sentinel, .error_set_inferred, .anyframe_T, .pointer, @@ -33050,15 +33047,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return Value.empty_struct; }, - .array => { - if (ty.arrayLen(mod) == 0) - return Value.initTag(.empty_array); - if ((try sema.typeHasOnePossibleValue(ty.childType(mod))) != null) { - return Value.initTag(.the_only_possible_value); - } - return null; - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -33631,10 +33619,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, - .array, - .array_sentinel, - => return sema.typeRequiresComptime(ty.childType(mod)), - .pointer => { const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 10a4856ad03c..c8e730354574 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -2467,11 +2467,11 @@ pub fn genErrDecls(o: *Object) !void { try writer.writeAll(";\n"); } - var name_array_ty_pl = Type.Payload.Array{ .base = .{ .tag = .array }, .data = .{ + const name_array_ty = try mod.arrayType(.{ .len = mod.error_name_list.items.len, - .elem_type = Type.const_slice_u8_sentinel_0, - } }; - const name_array_ty = Type.initPayload(&name_array_ty_pl.base); + .child = .const_slice_u8_sentinel_0_type, + .sentinel = .zero_u8, + }); try writer.writeAll("static "); try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete); diff --git a/src/type.zig b/src/type.zig index 2e4e9ca6fec7..8358f3678def 100644 --- a/src/type.zig +++ b/src/type.zig @@ -44,10 +44,6 @@ pub const Type = struct { .function => return .Fn, - .array, - .array_sentinel, - => return .Array, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, @@ -398,29 +394,6 @@ pub const Type = struct { return true; }, - .array, - .array_sentinel, - => { - if (a.zigTypeTag(mod) != b.zigTypeTag(mod)) return false; - - if (a.arrayLen(mod) != b.arrayLen(mod)) - return false; - const elem_ty = a.childType(mod); - if (!elem_ty.eql(b.childType(mod), mod)) - return false; - const sentinel_a = a.sentinel(mod); - const sentinel_b = b.sentinel(mod); - if (sentinel_a) |sa| { - if (sentinel_b) |sb| { - return sa.eql(sb, elem_ty, mod); - } else { - return false; - } - } else { - return sentinel_b == null; - } - }, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, @@ -627,17 +600,6 @@ pub const Type = struct { } }, - .array, - .array_sentinel, - => { - std.hash.autoHash(hasher, std.builtin.TypeId.Array); - - const elem_ty = ty.childType(mod); - std.hash.autoHash(hasher, ty.arrayLen(mod)); - hashWithHasher(elem_ty, hasher, mod); - hashSentinel(ty.sentinel(mod), elem_ty, hasher, mod); - }, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, @@ -770,21 +732,6 @@ pub const Type = struct { }; }, - .array => { - const payload = self.castTag(.array).?.data; - return Tag.array.create(allocator, .{ - .len = payload.len, - .elem_type = try payload.elem_type.copy(allocator), - }); - }, - .array_sentinel => { - const payload = self.castTag(.array_sentinel).?.data; - return Tag.array_sentinel.create(allocator, .{ - .len = payload.len, - .sentinel = try payload.sentinel.copy(allocator), - .elem_type = try payload.elem_type.copy(allocator), - }); - }, .tuple => { const payload = self.castTag(.tuple).?.data; const types = try allocator.alloc(Type, payload.types.len); @@ -987,21 +934,6 @@ pub const Type = struct { ty = return_type; continue; }, - .array => { - const payload = ty.castTag(.array).?.data; - try writer.print("[{d}]", .{payload.len}); - ty = payload.elem_type; - continue; - }, - .array_sentinel => { - const payload = ty.castTag(.array_sentinel).?.data; - try writer.print("[{d}:{}]", .{ - payload.len, - payload.sentinel.fmtDebug(), - }); - ty = payload.elem_type; - continue; - }, .tuple => { const tuple = ty.castTag(.tuple).?.data; try writer.writeAll("tuple{"); @@ -1198,19 +1130,6 @@ pub const Type = struct { try print(error_union.payload, writer, mod); }, - .array => { - const payload = ty.castTag(.array).?.data; - try writer.print("[{d}]", .{payload.len}); - try print(payload.elem_type, writer, mod); - }, - .array_sentinel => { - const payload = ty.castTag(.array_sentinel).?.data; - try writer.print("[{d}:{}]", .{ - payload.len, - payload.sentinel.fmtValue(payload.elem_type, mod), - }); - try print(payload.elem_type, writer, mod); - }, .tuple => { const tuple = ty.castTag(.tuple).?.data; @@ -1522,10 +1441,6 @@ pub const Type = struct { } }, - .array => return ty.arrayLen(mod) != 0 and - try ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - .array_sentinel => return ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { @@ -1723,10 +1638,6 @@ pub const Type = struct { .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, - .array, - .array_sentinel, - => ty.childType(mod).hasWellDefinedLayout(mod), - .optional => ty.isPtrLikeOptional(mod), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -2005,8 +1916,6 @@ pub const Type = struct { .error_set_merged, => return AbiAlignmentAdvanced{ .scalar = 2 }, - .array, .array_sentinel => return ty.childType(mod).abiAlignmentAdvanced(mod, strat), - .optional => return abiAlignmentAdvancedOptional(ty, mod, strat), .error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), @@ -2385,29 +2294,6 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, - .array => { - const payload = ty.castTag(.array).?.data; - switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = payload.len * elem_size }, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, - }, - } - }, - .array_sentinel => { - const payload = ty.castTag(.array_sentinel).?.data; - switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = (payload.len + 1) * elem_size }, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, - }, - } - }, - .anyframe_T => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .pointer => switch (ty.castTag(.pointer).?.data.size) { @@ -2729,24 +2615,6 @@ pub const Type = struct { return total; }, - .array => { - const payload = ty.castTag(.array).?.data; - const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); - if (elem_size == 0 or payload.len == 0) - return @as(u64, 0); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); - return (payload.len - 1) * 8 * elem_size + elem_bit_size; - }, - .array_sentinel => { - const payload = ty.castTag(.array_sentinel).?.data; - const elem_size = std.math.max( - payload.elem_type.abiAlignment(mod), - payload.elem_type.abiSize(mod), - ); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); - return payload.len * 8 * elem_size + elem_bit_size; - }, - .anyframe_T => return target.ptrBitWidth(), .pointer => switch (ty.castTag(.pointer).?.data.size) { @@ -3188,9 +3056,6 @@ pub const Type = struct { pub fn childTypeIp(ty: Type, ip: InternPool) Type { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .array => ty.castTag(.array).?.data.elem_type, - .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, - .pointer => ty.castTag(.pointer).?.data.pointee_type, else => unreachable, @@ -3211,9 +3076,6 @@ pub const Type = struct { pub fn elemType2(ty: Type, mod: *const Module) Type { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .array => ty.castTag(.array).?.data.elem_type, - .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, - .pointer => { const info = ty.castTag(.pointer).?.data; const child_ty = info.pointee_type; @@ -3483,8 +3345,6 @@ pub const Type = struct { return switch (ty.ip_index) { .empty_struct_type => 0, .none => switch (ty.tag()) { - .array => ty.castTag(.array).?.data.len, - .array_sentinel => ty.castTag(.array_sentinel).?.data.len, .tuple => ty.castTag(.tuple).?.data.types.len, .anon_struct => ty.castTag(.anon_struct).?.data.types.len, @@ -3524,12 +3384,9 @@ pub const Type = struct { pub fn sentinel(ty: Type, mod: *const Module) ?Value { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .array, - .tuple, - => null, + .tuple => null, .pointer => ty.castTag(.pointer).?.data.sentinel, - .array_sentinel => ty.castTag(.array_sentinel).?.data.sentinel, else => unreachable, }, @@ -3832,7 +3689,6 @@ pub const Type = struct { .error_set, .error_set_merged, .function, - .array_sentinel, .error_set_inferred, .anyframe_T, .pointer, @@ -3858,14 +3714,6 @@ pub const Type = struct { return Value.empty_struct; }, - .array => { - if (ty.arrayLen(mod) == 0) - return Value.initTag(.empty_array); - if ((try ty.childType(mod).onePossibleValue(mod)) != null) - return Value.initTag(.the_only_possible_value); - return null; - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -4034,10 +3882,6 @@ pub const Type = struct { .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, - .array, - .array_sentinel, - => return ty.childType(mod).comptimeOnly(mod), - .pointer => { const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { @@ -4804,8 +4648,6 @@ pub const Type = struct { inferred_alloc_const, // See last_no_payload_tag below. // After this, the tag requires a payload. - array, - array_sentinel, /// Possible Value tags for this: @"struct" tuple, /// Possible Value tags for this: @"struct" @@ -4838,8 +4680,6 @@ pub const Type = struct { .error_set_inferred => Payload.ErrorSetInferred, .error_set_merged => Payload.ErrorSetMerged, - .array => Payload.Array, - .array_sentinel => Payload.ArraySentinel, .pointer => Payload.Pointer, .function => Payload.Function, .error_union => Payload.ErrorUnion, @@ -4965,25 +4805,6 @@ pub const Type = struct { data: u64, }; - pub const Array = struct { - base: Payload, - data: struct { - len: u64, - elem_type: Type, - }, - }; - - pub const ArraySentinel = struct { - pub const base_tag = Tag.array_sentinel; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - len: u64, - sentinel: Value, - elem_type: Type, - }, - }; - pub const ElemType = struct { base: Payload, data: Type, @@ -5269,35 +5090,14 @@ pub const Type = struct { elem_type: Type, mod: *Module, ) Allocator.Error!Type { - if (elem_type.ip_index != .none) { - if (sent) |s| { - if (s.ip_index != .none) { - return mod.arrayType(.{ - .len = len, - .child = elem_type.ip_index, - .sentinel = s.ip_index, - }); - } - } else { - return mod.arrayType(.{ - .len = len, - .child = elem_type.ip_index, - .sentinel = .none, - }); - } - } - - if (sent) |some| { - return Tag.array_sentinel.create(arena, .{ - .len = len, - .sentinel = some, - .elem_type = elem_type, - }); - } + // TODO: update callsites of this function to directly call mod.arrayType + // and then delete this function. + _ = arena; - return Tag.array.create(arena, .{ + return mod.arrayType(.{ .len = len, - .elem_type = elem_type, + .child = elem_type.ip_index, + .sentinel = if (sent) |s| s.ip_index else .none, }); } From 88dbd62bcbac24c09791a7838d2f08c2f540967a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 12 May 2023 16:22:37 -0700 Subject: [PATCH 065/205] stage2: move enum tag values into the InternPool I'm seeing a new assertion trip: the call to `enumTagFieldIndex` in the implementation of `@Type` is attempting to query the field index of an union's enum tag, but the type of the enum tag value provided is not the same as the union's tag type. Most likely this is a problem with type coercion, since values are now typed. Another problem is that I added some hacks in std.builtin because I didn't see any convenient way to access them from Sema. That should definitely be cleaned up before merging this branch. --- lib/std/builtin.zig | 7 + src/Air.zig | 3 + src/InternPool.zig | 315 ++++++++++++++++------- src/Module.zig | 47 +++- src/Sema.zig | 487 ++++++++++++++++++------------------ src/TypedValue.zig | 21 +- src/Zir.zig | 3 + src/arch/wasm/CodeGen.zig | 87 +++---- src/arch/x86_64/CodeGen.zig | 12 +- src/codegen.zig | 28 +-- src/codegen/c.zig | 56 ++--- src/codegen/llvm.zig | 44 ++-- src/codegen/spirv.zig | 8 +- src/link/Coff.zig | 2 +- src/link/Elf.zig | 2 +- src/link/MachO.zig | 2 +- src/link/Wasm.zig | 2 +- src/type.zig | 59 +++-- src/value.zig | 254 ++++++++----------- 19 files changed, 768 insertions(+), 671 deletions(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index b1f1406684b3..429654bd4a6b 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -223,6 +223,13 @@ pub const SourceLocation = struct { pub const TypeId = std.meta.Tag(Type); pub const TypeInfo = @compileError("deprecated; use Type"); +/// TODO this is a temporary alias because I don't see any handy methods in +/// Sema for accessing inner declarations. +pub const PtrSize = Type.Pointer.Size; +/// TODO this is a temporary alias because I don't see any handy methods in +/// Sema for accessing inner declarations. +pub const TmpContainerLayoutAlias = Type.ContainerLayout; + /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Type = union(enum) { diff --git a/src/Air.zig b/src/Air.zig index 8059b9e57f07..e82a70100f5a 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -845,6 +845,7 @@ pub const Inst = struct { pub const Ref = enum(u32) { u1_type = @enumToInt(InternPool.Index.u1_type), + u5_type = @enumToInt(InternPool.Index.u5_type), u8_type = @enumToInt(InternPool.Index.u8_type), i8_type = @enumToInt(InternPool.Index.i8_type), u16_type = @enumToInt(InternPool.Index.u16_type), @@ -913,6 +914,8 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), + one_u5 = @enumToInt(InternPool.Index.one_u5), + four_u5 = @enumToInt(InternPool.Index.four_u5), negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), diff --git a/src/InternPool.zig b/src/InternPool.zig index 2677fba45d1d..eace006d4cbb 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -144,6 +144,9 @@ pub const Key = union(enum) { opaque_type: OpaqueType, enum_type: EnumType, + /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented + /// via `simple_value` and has a named `Index` tag for it. + undef: Index, simple_value: SimpleValue, extern_func: struct { ty: Index, @@ -155,13 +158,12 @@ pub const Key = union(enum) { lib_name: u32, }, int: Key.Int, + /// A specific enum tag, indicated by the integer tag value. + enum_tag: Key.EnumTag, float: Key.Float, ptr: Ptr, opt: Opt, - enum_tag: struct { - ty: Index, - tag: BigIntConst, - }, + /// An instance of a struct, array, or vector. /// Each element/field stored as an `Index`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, @@ -284,21 +286,33 @@ pub const Key = union(enum) { }; /// Look up field index based on field name. - pub fn nameIndex(self: EnumType, ip: InternPool, name: NullTerminatedString) ?usize { + pub fn nameIndex(self: EnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 { const map = &ip.maps.items[@enumToInt(self.names_map.unwrap().?)]; const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; - return map.getIndexAdapted(name, adapter); + const field_index = map.getIndexAdapted(name, adapter) orelse return null; + return @intCast(u32, field_index); } /// Look up field index based on tag value. /// Asserts that `values_map` is not `none`. /// This function returns `null` when `tag_val` does not have the /// integer tag type of the enum. - pub fn tagValueIndex(self: EnumType, ip: InternPool, tag_val: Index) ?usize { + pub fn tagValueIndex(self: EnumType, ip: *const InternPool, tag_val: Index) ?u32 { assert(tag_val != .none); - const map = &ip.maps.items[@enumToInt(self.values_map.unwrap().?)]; - const adapter: Index.Adapter = .{ .indexes = self.values }; - return map.getIndexAdapted(tag_val, adapter); + if (self.values_map.unwrap()) |values_map| { + const map = &ip.maps.items[@enumToInt(values_map)]; + const adapter: Index.Adapter = .{ .indexes = self.values }; + const field_index = map.getIndexAdapted(tag_val, adapter) orelse return null; + return @intCast(u32, field_index); + } + // Auto-numbered enum. Convert `tag_val` to field index. + switch (ip.indexToKey(tag_val).int.storage) { + .u64 => |x| { + if (x >= self.names.len) return null; + return @intCast(u32, x); + }, + .i64, .big_int => return null, // out of range + } } }; @@ -362,6 +376,13 @@ pub const Key = union(enum) { }; }; + pub const EnumTag = struct { + /// The enum type. + ty: Index, + /// The integer tag value which has the integer tag type of the enum. + int: Index, + }; + pub const Float = struct { ty: Index, /// The storage used must match the size of the float type being represented. @@ -436,6 +457,8 @@ pub const Key = union(enum) { .struct_type, .union_type, .un, + .undef, + .enum_tag, => |info| std.hash.autoHash(hasher, info), .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), @@ -471,12 +494,6 @@ pub const Key = union(enum) { } }, - .enum_tag => |enum_tag| { - std.hash.autoHash(hasher, enum_tag.ty); - std.hash.autoHash(hasher, enum_tag.tag.positive); - for (enum_tag.tag.limbs) |limb| std.hash.autoHash(hasher, limb); - }, - .aggregate => |aggregate| { std.hash.autoHash(hasher, aggregate.ty); for (aggregate.fields) |field| std.hash.autoHash(hasher, field); @@ -522,6 +539,10 @@ pub const Key = union(enum) { const b_info = b.simple_value; return a_info == b_info; }, + .undef => |a_info| { + const b_info = b.undef; + return a_info == b_info; + }, .extern_func => |a_info| { const b_info = b.extern_func; return std.meta.eql(a_info, b_info); @@ -542,6 +563,10 @@ pub const Key = union(enum) { const b_info = b.un; return std.meta.eql(a_info, b_info); }, + .enum_tag => |a_info| { + const b_info = b.enum_tag; + return std.meta.eql(a_info, b_info); + }, .ptr => |a_info| { const b_info = b.ptr; @@ -612,13 +637,6 @@ pub const Key = union(enum) { }; }, - .enum_tag => |a_info| { - const b_info = b.enum_tag; - _ = a_info; - _ = b_info; - @panic("TODO"); - }, - .opaque_type => |a_info| { const b_info = b.opaque_type; return a_info.decl == b_info.decl; @@ -636,7 +654,7 @@ pub const Key = union(enum) { } pub fn typeOf(key: Key) Index { - switch (key) { + return switch (key) { .int_type, .ptr_type, .array_type, @@ -648,7 +666,7 @@ pub const Key = union(enum) { .union_type, .opaque_type, .enum_type, - => return .type_type, + => .type_type, inline .ptr, .int, @@ -658,18 +676,20 @@ pub const Key = union(enum) { .enum_tag, .aggregate, .un, - => |x| return x.ty, + => |x| x.ty, + + .undef => |x| x, .simple_value => |s| switch (s) { - .undefined => return .undefined_type, - .void => return .void_type, - .null => return .null_type, - .false, .true => return .bool_type, - .empty_struct => return .empty_struct_type, - .@"unreachable" => return .noreturn_type, + .undefined => .undefined_type, + .void => .void_type, + .null => .null_type, + .false, .true => .bool_type, + .empty_struct => .empty_struct_type, + .@"unreachable" => .noreturn_type, .generic_poison => unreachable, }, - } + }; } }; @@ -693,6 +713,7 @@ pub const Index = enum(u32) { pub const last_value: Index = .empty_struct; u1_type, + u5_type, u8_type, i8_type, u16_type, @@ -769,6 +790,10 @@ pub const Index = enum(u32) { one, /// `1` (usize) one_usize, + /// `1` (u5) + one_u5, + /// `4` (u5) + four_u5, /// `-1` (comptime_int) negative_one, /// `std.builtin.CallingConvention.C` @@ -834,6 +859,12 @@ pub const static_keys = [_]Key{ .bits = 1, } }, + // u5_type + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 5, + } }, + .{ .int_type = .{ .signedness = .unsigned, .bits = 8, @@ -1021,25 +1052,30 @@ pub const static_keys = [_]Key{ .storage = .{ .u64 = 1 }, } }, + // one_u5 + .{ .int = .{ + .ty = .u5_type, + .storage = .{ .u64 = 1 }, + } }, + // four_u5 + .{ .int = .{ + .ty = .u5_type, + .storage = .{ .u64 = 4 }, + } }, + // negative_one .{ .int = .{ .ty = .comptime_int_type, .storage = .{ .i64 = -1 }, } }, - + // calling_convention_c .{ .enum_tag = .{ .ty = .calling_convention_type, - .tag = .{ - .limbs = &.{@enumToInt(std.builtin.CallingConvention.C)}, - .positive = true, - }, + .int = .one_u5, } }, - + // calling_convention_inline .{ .enum_tag = .{ .ty = .calling_convention_type, - .tag = .{ - .limbs = &.{@enumToInt(std.builtin.CallingConvention.Inline)}, - .positive = true, - }, + .int = .four_u5, } }, .{ .simple_value = .void }, @@ -1118,6 +1154,10 @@ pub const Tag = enum(u8) { /// `data` is `Module.Union.Index`. type_union_safety, + /// Typed `undefined`. + /// `data` is `Index` of the type. + /// Untyped `undefined` is stored instead via `simple_value`. + undef, /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, @@ -1132,7 +1172,7 @@ pub const Tag = enum(u8) { /// already contains the optional type corresponding to this payload. opt_payload, /// An optional value that is null. - /// data is Index of the payload type. + /// data is Index of the optional type. opt_null, /// Type: u8 /// data is integer value @@ -1155,18 +1195,18 @@ pub const Tag = enum(u8) { /// A comptime_int that fits in an i32. /// data is integer value bitcasted to u32. int_comptime_int_i32, + /// An integer value that fits in 32 bits with an explicitly provided type. + /// data is extra index of `IntSmall`. + int_small, /// A positive integer value. - /// data is a limbs index to Int. + /// data is a limbs index to `Int`. int_positive, /// A negative integer value. - /// data is a limbs index to Int. + /// data is a limbs index to `Int`. int_negative, - /// An enum tag identified by a positive integer value. - /// data is a limbs index to Int. - enum_tag_positive, - /// An enum tag identified by a negative integer value. - /// data is a limbs index to Int. - enum_tag_negative, + /// An enum tag value. + /// data is extra index of `Key.EnumTag`. + enum_tag, /// An f16 value. /// data is float value bitcasted to u16 and zero-extended. float_f16, @@ -1404,6 +1444,11 @@ pub const Int = struct { limbs_len: u32, }; +pub const IntSmall = struct { + ty: Index, + value: u32, +}; + /// A f64 value, broken up into 2 u32 parts. pub const Float64 = struct { piece0: u32, @@ -1479,15 +1524,28 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void { try ip.items.ensureUnusedCapacity(gpa, static_keys.len); try ip.map.ensureUnusedCapacity(gpa, static_keys.len); try ip.extra.ensureUnusedCapacity(gpa, static_keys.len); - try ip.limbs.ensureUnusedCapacity(gpa, 2); // This inserts all the statically-known values into the intern pool in the // order expected. for (static_keys) |key| _ = ip.get(gpa, key) catch unreachable; - // Sanity check. - assert(ip.indexToKey(.bool_true).simple_value == .true); - assert(ip.indexToKey(.bool_false).simple_value == .false); + if (std.debug.runtime_safety) { + // Sanity check. + assert(ip.indexToKey(.bool_true).simple_value == .true); + assert(ip.indexToKey(.bool_false).simple_value == .false); + + const cc_inline = ip.indexToKey(.calling_convention_inline).enum_tag.int; + const cc_c = ip.indexToKey(.calling_convention_c).enum_tag.int; + + assert(ip.indexToKey(cc_inline).int.storage.u64 == + @enumToInt(std.builtin.CallingConvention.Inline)); + + assert(ip.indexToKey(cc_c).int.storage.u64 == + @enumToInt(std.builtin.CallingConvention.C)); + + assert(ip.indexToKey(ip.typeOf(cc_inline)).int_type.bits == + @typeInfo(@typeInfo(std.builtin.CallingConvention).Enum.tag_type).Int.bits); + } assert(ip.items.len == static_keys.len); } @@ -1634,6 +1692,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_enum_explicit => indexToKeyEnum(ip, data, .explicit), .type_enum_nonexhaustive => indexToKeyEnum(ip, data, .nonexhaustive), + .undef => .{ .undef = @intToEnum(Index, data) }, .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), .val = .none, @@ -1687,8 +1746,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }, .int_positive => indexToKeyBigInt(ip, data, true), .int_negative => indexToKeyBigInt(ip, data, false), - .enum_tag_positive => @panic("TODO"), - .enum_tag_negative => @panic("TODO"), + .int_small => { + const info = ip.extraData(IntSmall, data); + return .{ .int = .{ + .ty = info.ty, + .storage = .{ .u64 = info.value }, + } }; + }, .float_f16 => .{ .float = .{ .ty = .f16_type, .storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) }, @@ -1734,6 +1798,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }; }, .union_value => .{ .un = ip.extraData(Key.Union, data) }, + .enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) }, }; } @@ -1896,6 +1961,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = @enumToInt(simple_value), }); }, + .undef => |ty| { + assert(ty != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .undef, + .data = @enumToInt(ty), + }); + }, .struct_type => |struct_type| { ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{ @@ -2112,10 +2184,32 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } switch (int.storage) { .big_int => |big_int| { + if (big_int.to(u32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small, + .data = try ip.addExtra(gpa, IntSmall{ + .ty = int.ty, + .value = casted, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } else |_| {} + const tag: Tag = if (big_int.positive) .int_positive else .int_negative; try addInt(ip, gpa, int.ty, tag, big_int.limbs); }, - inline .i64, .u64 => |x| { + inline .u64, .i64 => |x| { + if (std.math.cast(u32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small, + .data = try ip.addExtra(gpa, IntSmall{ + .ty = int.ty, + .value = casted, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } + var buf: [2]Limb = undefined; const big_int = BigIntMutable.init(&buf, x).toConst(); const tag: Tag = if (big_int.positive) .int_positive else .int_negative; @@ -2124,6 +2218,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } }, + .enum_tag => |enum_tag| { + assert(enum_tag.ty != .none); + assert(enum_tag.int != .none); + + ip.items.appendAssumeCapacity(.{ + .tag = .enum_tag, + .data = try ip.addExtra(gpa, enum_tag), + }); + }, + .float => |float| { switch (float.ty) { .f16_type => ip.items.appendAssumeCapacity(.{ @@ -2164,11 +2268,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } }, - .enum_tag => |enum_tag| { - const tag: Tag = if (enum_tag.tag.positive) .enum_tag_positive else .enum_tag_negative; - try addInt(ip, gpa, enum_tag.ty, tag, enum_tag.tag.limbs); - }, - .aggregate => |aggregate| { if (aggregate.fields.len == 0) { ip.items.appendAssumeCapacity(.{ @@ -2671,44 +2770,59 @@ pub fn slicePtrType(ip: InternPool, i: Index) Index { /// Given an existing value, returns the same value but with the supplied type. /// Only some combinations are allowed: -/// * int to int +/// * int <=> int +/// * int <=> enum pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { switch (ip.indexToKey(val)) { - .int => |int| { - // The key cannot be passed directly to `get`, otherwise in the case of - // big_int storage, the limbs would be invalidated before they are read. - // Here we pre-reserve the limbs to ensure that the logic in `addInt` will - // not use an invalidated limbs pointer. - switch (int.storage) { - .u64 => |x| return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = .{ .u64 = x }, - } }), - .i64 => |x| return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = .{ .i64 = x }, - } }), - - .big_int => |big_int| { - const positive = big_int.positive; - const limbs = ip.limbsSliceToIndex(big_int.limbs); - // This line invalidates the limbs slice, but the indexes computed in the - // previous line are still correct. - try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); - return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = .{ .big_int = .{ - .limbs = ip.limbsIndexToSlice(limbs), - .positive = positive, - } }, - } }); - }, - } + .int => |int| switch (ip.indexToKey(new_ty)) { + .enum_type => return ip.get(gpa, .{ .enum_tag = .{ + .ty = new_ty, + .int = val, + } }), + else => return getCoercedInts(ip, gpa, int, new_ty), + }, + .enum_tag => |enum_tag| { + // Assume new_ty is an integer type. + return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty); }, else => unreachable, } } +/// Asserts `val` has an integer type. +/// Assumes `new_ty` is an integer type. +pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Index) Allocator.Error!Index { + // The key cannot be passed directly to `get`, otherwise in the case of + // big_int storage, the limbs would be invalidated before they are read. + // Here we pre-reserve the limbs to ensure that the logic in `addInt` will + // not use an invalidated limbs pointer. + switch (int.storage) { + .u64 => |x| return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = .{ .u64 = x }, + } }), + .i64 => |x| return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = .{ .i64 = x }, + } }), + + .big_int => |big_int| { + const positive = big_int.positive; + const limbs = ip.limbsSliceToIndex(big_int.limbs); + // This line invalidates the limbs slice, but the indexes computed in the + // previous line are still correct. + try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); + return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = .{ .big_int = .{ + .limbs = ip.limbsIndexToSlice(limbs), + .positive = positive, + } }, + } }); + }, + } +} + pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex { const tags = ip.items.items(.tag); if (val == .none) return .none; @@ -2805,6 +2919,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_union_safety, => @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), + .undef => 0, .simple_type => 0, .simple_value => 0, .ptr_int => @sizeOf(PtrInt), @@ -2817,15 +2932,15 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .int_usize => 0, .int_comptime_int_u32 => 0, .int_comptime_int_i32 => 0, + .int_small => @sizeOf(IntSmall), .int_positive, .int_negative, - .enum_tag_positive, - .enum_tag_negative, => b: { const int = ip.limbData(Int, data); break :b @sizeOf(Int) + int.limbs_len * 8; }, + .enum_tag => @sizeOf(Key.EnumTag), .float_f16 => 0, .float_f32 => 0, @@ -2958,3 +3073,9 @@ pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { pub fn typeOf(ip: InternPool, index: Index) Index { return ip.indexToKey(index).typeOf(); } + +/// Assumes that the enum's field indexes equal its value tags. +pub fn toEnum(ip: InternPool, comptime E: type, i: Index) E { + const int = ip.indexToKey(i).enum_tag.int; + return @intToEnum(E, ip.indexToKey(int).int.storage.u64); +} diff --git a/src/Module.zig b/src/Module.zig index 426d27401150..cf1fea3444c9 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6896,6 +6896,43 @@ pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value return i.toValue(); } +/// Creates an enum tag value based on the integer tag value. +pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Error!Value { + if (std.debug.runtime_safety) { + const tag = ty.zigTypeTag(mod); + assert(tag == .Enum); + } + const i = try intern(mod, .{ .enum_tag = .{ + .ty = ty.ip_index, + .int = tag_int, + } }); + return i.toValue(); +} + +/// Creates an enum tag value based on the field index according to source code +/// declaration order. +pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value { + const ip = &mod.intern_pool; + const gpa = mod.gpa; + const enum_type = ip.indexToKey(ty.ip_index).enum_type; + + if (enum_type.values.len == 0) { + // Auto-numbered fields. + return (try ip.get(gpa, .{ .enum_tag = .{ + .ty = ty.ip_index, + .int = try ip.get(gpa, .{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = field_index }, + } }), + } })).toValue(); + } + + return (try ip.get(gpa, .{ .enum_tag = .{ + .ty = ty.ip_index, + .int = enum_type.values[field_index], + } })).toValue(); +} + pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { if (std.debug.runtime_safety) { const tag = ty.zigTypeTag(mod); @@ -6967,8 +7004,8 @@ pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { /// `max`. Asserts that neither value is undef. /// TODO: if #3806 is implemented, this becomes trivial pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { - assert(!min.isUndef()); - assert(!max.isUndef()); + assert(!min.isUndef(mod)); + assert(!max.isUndef(mod)); if (std.debug.runtime_safety) { assert(Value.order(min, max, mod).compare(.lte)); @@ -6990,7 +7027,7 @@ pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { /// twos-complement integer; otherwise in an unsigned integer. /// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { - assert(!val.isUndef()); + assert(!val.isUndef(mod)); const key = mod.intern_pool.indexToKey(val.ip_index); switch (key.int.storage) { @@ -7193,3 +7230,7 @@ pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQu return owner_decl.srcLoc(mod); } } + +pub fn toEnum(mod: *Module, comptime E: type, val: Value) E { + return mod.intern_pool.toEnum(E, val.ip_index); +} diff --git a/src/Sema.zig b/src/Sema.zig index 9e89ca89ef94..2fc364ebd78f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1904,8 +1904,9 @@ fn resolveDefinedValue( src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!?Value { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(air_ref)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { if (block.is_typeof) return null; return sema.failWithUseOfUndef(block, src); } @@ -4333,7 +4334,7 @@ fn validateUnionInit( const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); - const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); if (init_val) |val| { // Our task is to delete all the `field_ptr` and `store` instructions, and insert @@ -4832,7 +4833,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const elem_ty = operand_ty.elemType2(mod); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { return sema.fail(block, src, "cannot dereference undefined value", .{}); } } else if (!(try sema.validateRunTimeType(elem_ty, false))) { @@ -6194,15 +6195,16 @@ fn lookupInNamespace( } fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { + const mod = sema.mod; const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null; - if (func_val.isUndef()) return null; + if (func_val.isUndef(mod)) return null; const owner_decl_index = switch (func_val.tag()) { .extern_fn => func_val.castTag(.extern_fn).?.data.owner_decl, .function => func_val.castTag(.function).?.data.owner_decl, - .decl_ref => sema.mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl, + .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl, else => return null, }; - return sema.mod.declPtr(owner_decl_index); + return mod.declPtr(owner_decl_index); } pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref { @@ -8106,7 +8108,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { return sema.addConstUndef(Type.err_int); } switch (val.tag()) { @@ -8326,7 +8328,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; return sema.failWithOwnedErrorMsg(msg); } - if (int_val.isUndef()) { + if (int_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, operand_src); } if (!(try sema.enumHasInt(dest_ty, int_val))) { @@ -11472,7 +11474,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (f != null) continue; cases_len += 1; - const item_val = try Value.Tag.enum_field_index.create(sema.arena, @intCast(u32, i)); + const item_val = try mod.enumValueFieldIndex(operand_ty, @intCast(u32, i)); const item_ref = try sema.addConstant(operand_ty, item_val); case_block.inline_case_capture = item_ref; @@ -12208,7 +12210,7 @@ fn zirShl( const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs); if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(sema.typeOf(lhs)); } // If rhs is 0, return lhs without doing any calculations. @@ -12255,7 +12257,7 @@ fn zirShl( } const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { - if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty); + if (lhs_val.isUndef(mod)) return sema.addConstUndef(lhs_ty); const rhs_val = maybe_rhs_val orelse { if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); @@ -12389,7 +12391,7 @@ fn zirShr( const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs); const runtime_src = if (maybe_rhs_val) |rhs_val| rs: { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(lhs_ty); } // If rhs is 0, return lhs without doing any calculations. @@ -12434,7 +12436,7 @@ fn zirShr( }); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(lhs_ty); } if (air_tag == .shr_exact) { @@ -12578,7 +12580,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { return sema.addConstUndef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); @@ -13154,7 +13156,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (rhs_scalar_ty.isAnyFloat()) { // We handle float negation here to ensure negative zero is represented in the bits. if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(rhs_ty); + if (rhs_val.isUndef(mod)) return sema.addConstUndef(rhs_ty); return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, sema.mod)); } try sema.requireRuntimeBlock(block, src, null); @@ -13297,7 +13299,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins switch (scalar_tag) { .Int, .ComptimeInt, .ComptimeFloat => { if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), @@ -13312,7 +13314,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13326,7 +13328,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const runtime_src = rs: { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { @@ -13434,7 +13436,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, compile error because there is a possible // value for which the division would result in a remainder. if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } else { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -13451,7 +13453,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13611,7 +13613,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), @@ -13626,7 +13628,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13635,7 +13637,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // TODO: if the RHS is one, return the LHS directly } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { @@ -13732,7 +13734,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), @@ -13747,7 +13749,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13755,7 +13757,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { @@ -13977,7 +13979,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. // then emit a compile error saying you have to pick one. if (is_int) { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -13995,7 +13997,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14024,7 +14026,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } // float operands if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14034,7 +14036,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef() or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return sema.addConstant( @@ -14155,12 +14157,12 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14179,7 +14181,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } // float operands if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14187,7 +14189,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14257,12 +14259,12 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14281,7 +14283,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } // float operands if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14289,7 +14291,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14372,18 +14374,18 @@ fn zirOverflowArithmetic( // to the result, even if it is undefined.. // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14396,12 +14398,12 @@ fn zirOverflowArithmetic( // If the rhs is zero, then the result is lhs and no overflow occured. // Otherwise, if either result is undefined, both results are undefined. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14416,7 +14418,7 @@ fn zirOverflowArithmetic( // Otherwise, if either of the arguments is undefined, both results are undefined. const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { @@ -14426,7 +14428,7 @@ fn zirOverflowArithmetic( } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef()) { + if (!rhs_val.isUndef(mod)) { if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { @@ -14437,7 +14439,7 @@ fn zirOverflowArithmetic( if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14451,18 +14453,18 @@ fn zirOverflowArithmetic( // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred. // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14606,12 +14608,12 @@ fn analyzeArithmetic( // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { @@ -14624,7 +14626,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .add_optimized else .add; if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { @@ -14653,13 +14655,13 @@ fn analyzeArithmetic( // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { return casted_rhs; } } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .addwrap_optimized else .addwrap; if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14678,12 +14680,12 @@ fn analyzeArithmetic( // If either of the operands are zero, then the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14708,7 +14710,7 @@ fn analyzeArithmetic( // overflow, causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { @@ -14721,7 +14723,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .sub_optimized else .sub; if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { @@ -14750,7 +14752,7 @@ fn analyzeArithmetic( // If the RHS is zero, then the other operand is returned, even if it is undefined. // If either of the operands are undefined, the result is undefined. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14759,7 +14761,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .subwrap_optimized else .subwrap; if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14775,7 +14777,7 @@ fn analyzeArithmetic( // If the RHS is zero, result is LHS. // If either of the operands are undefined, result is undefined. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14783,7 +14785,7 @@ fn analyzeArithmetic( } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14814,7 +14816,7 @@ fn analyzeArithmetic( else => unreachable, }; if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (lhs_val.isNan(mod)) { return sema.addConstant(resolved_type, lhs_val); } @@ -14844,7 +14846,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mul_optimized else .mul; if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { @@ -14874,7 +14876,7 @@ fn analyzeArithmetic( return casted_lhs; } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { @@ -14908,7 +14910,7 @@ fn analyzeArithmetic( else => unreachable, }; if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); @@ -14922,7 +14924,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mulwrap_optimized else .mulwrap; if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14935,7 +14937,7 @@ fn analyzeArithmetic( return casted_lhs; } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } return sema.addConstant( @@ -14956,7 +14958,7 @@ fn analyzeArithmetic( else => unreachable, }; if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); @@ -14969,7 +14971,7 @@ fn analyzeArithmetic( } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14982,7 +14984,7 @@ fn analyzeArithmetic( return casted_lhs; } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } @@ -15100,7 +15102,7 @@ fn analyzePtrArithmetic( const runtime_src = rs: { if (opt_ptr_val) |ptr_val| { if (opt_off_val) |offset_val| { - if (ptr_val.isUndef()) return sema.addConstUndef(new_ptr_ty); + if (ptr_val.isUndef(mod)) return sema.addConstUndef(new_ptr_ty); const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod)); if (offset_int == 0) return ptr; @@ -15363,7 +15365,7 @@ fn zirCmpEq( const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(lhs)) |lval| { if (try sema.resolveMaybeUndefVal(rhs)) |rval| { - if (lval.isUndef() or rval.isUndef()) { + if (lval.isUndef(mod) or rval.isUndef(mod)) { return sema.addConstUndef(Type.bool); } // TODO optimisation opportunity: evaluate if mem.eql is faster with the names, @@ -15425,7 +15427,7 @@ fn analyzeCmpUnionTag( const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src); if (try sema.resolveMaybeUndefVal(coerced_tag)) |enum_val| { - if (enum_val.isUndef()) return sema.addConstUndef(Type.bool); + if (enum_val.isUndef(mod)) return sema.addConstUndef(Type.bool); const field_ty = union_ty.unionFieldType(enum_val, sema.mod); if (field_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; @@ -15527,9 +15529,9 @@ fn cmpSelf( const resolved_type = sema.typeOf(casted_lhs); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { - if (lhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (lhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); if (resolved_type.zigTypeTag(mod) == .Vector) { const result_ty = try mod.vectorType(.{ @@ -15557,7 +15559,7 @@ fn cmpSelf( // bool eq/neq more efficiently. if (resolved_type.zigTypeTag(mod) == .Bool) { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(mod), lhs_src); } } @@ -15892,68 +15894,69 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); const type_info_ty = try sema.getBuiltinType("Type"); + const type_info_tag_ty = type_info_ty.unionTagType(mod).?; switch (ty.zigTypeTag(mod)) { .Type => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Type)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Type)), .val = Value.void, }), ), .Void => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Void)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Void)), .val = Value.void, }), ), .Bool => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Bool)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Bool)), .val = Value.void, }), ), .NoReturn => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.NoReturn)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.NoReturn)), .val = Value.void, }), ), .ComptimeFloat => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeFloat)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeFloat)), .val = Value.void, }), ), .ComptimeInt => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeInt)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeInt)), .val = Value.void, }), ), .Undefined => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Undefined)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Undefined)), .val = Value.void, }), ), .Null => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Null)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Null)), .val = Value.void, }), ), .EnumLiteral => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.EnumLiteral)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.EnumLiteral)), .val = Value.void, }), ), @@ -16040,10 +16043,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else Value.null; + const callconv_ty = try sema.getBuiltinType("CallingConvention"); + const field_values = try sema.arena.create([6]Value); field_values.* = .{ // calling_convention: CallingConvention, - try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)), + try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc)), // alignment: comptime_int, try mod.intValue(Type.comptime_int, ty.abiAlignment(mod)), // is_generic: bool, @@ -16059,26 +16064,24 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Fn)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); }, .Int => { + const signedness_ty = try sema.getBuiltinType("Signedness"); const info = ty.intInfo(mod); const field_values = try sema.arena.alloc(Value, 2); // signedness: Signedness, - field_values[0] = try Value.Tag.enum_field_index.create( - sema.arena, - @enumToInt(info.signedness), - ); + field_values[0] = try mod.enumValueFieldIndex(signedness_ty, @enumToInt(info.signedness)); // bits: u16, field_values[1] = try mod.intValue(Type.u16, info.bits); return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Int)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Int)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16091,7 +16094,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Float)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16103,10 +16106,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try info.pointee_type.lazyAbiAlignment(mod, sema.arena); + const addrspace_ty = try sema.getBuiltinType("AddressSpace"); + const ptr_size_ty = try sema.getBuiltinType("PtrSize"); + const field_values = try sema.arena.create([8]Value); field_values.* = .{ // size: Size, - try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size)), + try mod.enumValueFieldIndex(ptr_size_ty, @enumToInt(info.size)), // is_const: bool, Value.makeBool(!info.mutable), // is_volatile: bool, @@ -16114,7 +16120,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // alignment: comptime_int, alignment, // address_space: AddressSpace - try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.@"addrspace")), + try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace")), // child: type, try Value.Tag.ty.create(sema.arena, info.pointee_type), // is_allowzero: bool, @@ -16126,7 +16132,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Pointer)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Pointer)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16144,7 +16150,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Array)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Array)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16160,7 +16166,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Vector)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16173,7 +16179,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Optional)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Optional)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16263,7 +16269,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorSet)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet)), .val = errors_val, }), ); @@ -16278,7 +16284,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorUnion)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16365,7 +16371,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Enum)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16454,13 +16460,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :v try Value.Tag.opt_payload.create(sema.arena, ty_val); } else Value.null; + const container_layout_ty = try sema.getBuiltinType("TmpContainerLayoutAlias"); + const field_values = try sema.arena.create([4]Value); field_values.* = .{ // layout: ContainerLayout, - try Value.Tag.enum_field_index.create( - sema.arena, - @enumToInt(layout), - ), + try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)), // tag_type: ?type, enum_tag_ty_val, @@ -16473,7 +16478,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Union)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16625,13 +16630,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }; + const container_layout_ty = try sema.getBuiltinType("TmpContainerLayoutAlias"); + const field_values = try sema.arena.create([5]Value); field_values.* = .{ // layout: ContainerLayout, - try Value.Tag.enum_field_index.create( - sema.arena, - @enumToInt(layout), - ), + try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)), // backing_integer: ?type, backing_integer_val, // fields: []const StructField, @@ -16645,7 +16649,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Struct)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16665,7 +16669,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Opaque)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16912,7 +16916,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src); if (try sema.resolveMaybeUndefVal(operand)) |val| { - return if (val.isUndef()) + return if (val.isUndef(mod)) sema.addConstUndef(Type.bool) else if (val.toBool(mod)) Air.Inst.Ref.bool_false @@ -17879,7 +17883,7 @@ fn unionInit( if (try sema.resolveMaybeUndefVal(init)) |init_val| { const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); - const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = init_val, @@ -17980,7 +17984,7 @@ fn zirStructInit( const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); - const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); const init_inst = try sema.resolveInst(item.data.init); if (try sema.resolveMaybeUndefVal(init_inst)) |val| { @@ -18614,7 +18618,7 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(Type.u1); + if (val.isUndef(mod)) return sema.addConstUndef(Type.u1); if (val.toBool(mod)) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1)); return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); } @@ -18673,7 +18677,7 @@ fn zirUnaryMath( .child = scalar_ty.ip_index, }); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) + if (val.isUndef(mod)) return sema.addConstUndef(result_ty); const elems = try sema.arena.alloc(Value, vec_len); @@ -18692,7 +18696,7 @@ fn zirUnaryMath( }, .ComptimeFloat, .Float => { if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - if (operand_val.isUndef()) + if (operand_val.isUndef(mod)) return sema.addConstUndef(operand_ty); const result_val = try eval(operand_val, operand_ty, sema.arena, sema.mod); return sema.addConstant(operand_ty, result_val); @@ -18809,7 +18813,7 @@ fn zirReify( const signedness_val = struct_val[0]; const bits_val = struct_val[1]; - const signedness = signedness_val.toEnum(std.builtin.Signedness); + const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = try mod.intType(signedness, bits); return sema.addType(ty); @@ -18874,7 +18878,7 @@ fn zirReify( break :t elem_ty; }; - const ptr_size = size_val.toEnum(std.builtin.Type.Pointer.Size); + const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val); var actual_sentinel: ?Value = null; if (!sentinel_val.isNull(mod)) { @@ -18927,7 +18931,7 @@ fn zirReify( .mutable = !is_const_val.toBool(mod), .@"volatile" = is_volatile_val.toBool(mod), .@"align" = abi_align, - .@"addrspace" = address_space_val.toEnum(std.builtin.AddressSpace), + .@"addrspace" = mod.toEnum(std.builtin.AddressSpace, address_space_val), .pointee_type = try elem_ty.copy(sema.arena), .@"allowzero" = is_allowzero_val.toBool(mod), .sentinel = actual_sentinel, @@ -19033,7 +19037,7 @@ fn zirReify( const is_tuple_val = struct_val[4]; assert(struct_val.len == 5); - const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout); + const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19208,7 +19212,7 @@ fn zirReify( if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified unions must have no decls", .{}); } - const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout); + const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); @@ -19309,7 +19313,7 @@ fn zirReify( } if (explicit_enum_info) |tag_info| { - const enum_index = tag_info.nameIndex(mod.intern_pool, field_name_ip) orelse { + const enum_index = tag_info.nameIndex(&mod.intern_pool, field_name_ip) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) }); errdefer msg.destroy(gpa); @@ -19402,7 +19406,7 @@ fn zirReify( const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // calling_convention: CallingConvention, - const cc = struct_val[0].toEnum(std.builtin.CallingConvention); + const cc = mod.toEnum(std.builtin.CallingConvention, struct_val[0]); // alignment: comptime_int, const alignment_val = struct_val[1]; // is_generic: bool, @@ -20180,7 +20184,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } if (try sema.resolveMaybeUndefVal(ptr)) |operand_val| { - if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef()) { + if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, operand_src); } if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { @@ -20315,7 +20319,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (try sema.resolveMaybeUndefValIntable(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(dest_ty); + if (val.isUndef(mod)) return sema.addConstUndef(dest_ty); if (!is_vector) { return sema.addConstant( dest_ty, @@ -20419,7 +20423,7 @@ fn zirBitCount( .child = result_scalar_ty.ip_index, }); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(result_ty); + if (val.isUndef(mod)) return sema.addConstUndef(result_ty); const elems = try sema.arena.alloc(Value, vec_len); const scalar_ty = operand_ty.scalarType(mod); @@ -20439,7 +20443,7 @@ fn zirBitCount( }, .Int => { if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(result_scalar_ty); + if (val.isUndef(mod)) return sema.addConstUndef(result_scalar_ty); try sema.resolveLazyValue(val); return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, mod)); } else { @@ -20476,7 +20480,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(operand_ty); + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); const result_val = try val.byteSwap(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20486,7 +20490,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Vector => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); @@ -20524,7 +20528,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(operand_ty); + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); const result_val = try val.bitReverse(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20534,7 +20538,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! }, .Vector => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); @@ -21072,7 +21076,7 @@ fn resolveExportOptions( const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known"); - const linkage = linkage_val.toEnum(std.builtin.GlobalLinkage); + const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); const section_operand = try sema.fieldVal(block, src, options, "section", section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); @@ -21084,7 +21088,7 @@ fn resolveExportOptions( const visibility_operand = try sema.fieldVal(block, src, options, "visibility", visibility_src); const visibility_val = try sema.resolveConstValue(block, visibility_src, visibility_operand, "visibility of exported value must be comptime-known"); - const visibility = visibility_val.toEnum(std.builtin.SymbolVisibility); + const visibility = mod.toEnum(std.builtin.SymbolVisibility, visibility_val); if (name.len < 1) { return sema.fail(block, name_src, "exported symbol name cannot be empty", .{}); @@ -21112,11 +21116,12 @@ fn resolveBuiltinEnum( comptime name: []const u8, reason: []const u8, ) CompileError!@field(std.builtin, name) { + const mod = sema.mod; const ty = try sema.getBuiltinType(name); const air_ref = try sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced, reason); - return val.toEnum(@field(std.builtin, name)); + return mod.toEnum(@field(std.builtin, name), val); } fn resolveAtomicOrder( @@ -21198,7 +21203,7 @@ fn zirCmpxchg( const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { if (try sema.resolveMaybeUndefVal(expected_value)) |expected_val| { if (try sema.resolveMaybeUndefVal(new_value)) |new_val| { - if (expected_val.isUndef() or new_val.isUndef()) { + if (expected_val.isUndef(mod) or new_val.isUndef(mod)) { // TODO: this should probably cause the memory stored at the pointer // to become undef as well return sema.addConstUndef(result_ty); @@ -21248,7 +21253,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I .child = scalar_ty.ip_index, }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { - if (scalar_val.isUndef()) return sema.addConstUndef(vector_ty); + if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty); return sema.addConstant( vector_ty, @@ -21300,7 +21305,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty); + if (operand_val.isUndef(mod)) return sema.addConstUndef(scalar_ty); var accum: Value = try operand_val.elemValue(mod, 0); var i: u32 = 1; @@ -21420,7 +21425,7 @@ fn analyzeShuffle( var i: usize = 0; while (i < mask_len) : (i += 1) { const elem = try mask.elemValue(sema.mod, i); - if (elem.isUndef()) continue; + if (elem.isUndef(mod)) continue; const int = elem.toSignedInt(mod); var unsigned: u32 = undefined; var chosen: u32 = undefined; @@ -21458,7 +21463,7 @@ fn analyzeShuffle( i = 0; while (i < mask_len) : (i += 1) { const mask_elem_val = try mask.elemValue(sema.mod, i); - if (mask_elem_val.isUndef()) { + if (mask_elem_val.isUndef(mod)) { values[i] = Value.undef; continue; } @@ -21559,13 +21564,13 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const maybe_b = try sema.resolveMaybeUndefVal(b); const runtime_src = if (maybe_pred) |pred_val| rs: { - if (pred_val.isUndef()) return sema.addConstUndef(vec_ty); + if (pred_val.isUndef(mod)) return sema.addConstUndef(vec_ty); if (maybe_a) |a_val| { - if (a_val.isUndef()) return sema.addConstUndef(vec_ty); + if (a_val.isUndef(mod)) return sema.addConstUndef(vec_ty); if (maybe_b) |b_val| { - if (b_val.isUndef()) return sema.addConstUndef(vec_ty); + if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); const elems = try sema.gpa.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { @@ -21587,16 +21592,16 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C } } else { if (maybe_b) |b_val| { - if (b_val.isUndef()) return sema.addConstUndef(vec_ty); + if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); } break :rs a_src; } } else rs: { if (maybe_a) |a_val| { - if (a_val.isUndef()) return sema.addConstUndef(vec_ty); + if (a_val.isUndef(mod)) return sema.addConstUndef(vec_ty); } if (maybe_b) |b_val| { - if (b_val.isUndef()) return sema.addConstUndef(vec_ty); + if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); } break :rs pred_src; }; @@ -21803,10 +21808,10 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const runtime_src = if (maybe_mulend1) |mulend1_val| rs: { if (maybe_mulend2) |mulend2_val| { - if (mulend2_val.isUndef()) return sema.addConstUndef(ty); + if (mulend2_val.isUndef(mod)) return sema.addConstUndef(ty); if (maybe_addend) |addend_val| { - if (addend_val.isUndef()) return sema.addConstUndef(ty); + if (addend_val.isUndef(mod)) return sema.addConstUndef(ty); const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod); return sema.addConstant(ty, result_val); } else { @@ -21814,16 +21819,16 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } } else { if (maybe_addend) |addend_val| { - if (addend_val.isUndef()) return sema.addConstUndef(ty); + if (addend_val.isUndef(mod)) return sema.addConstUndef(ty); } break :rs mulend2_src; } } else rs: { if (maybe_mulend2) |mulend2_val| { - if (mulend2_val.isUndef()) return sema.addConstUndef(ty); + if (mulend2_val.isUndef(mod)) return sema.addConstUndef(ty); } if (maybe_addend) |addend_val| { - if (addend_val.isUndef()) return sema.addConstUndef(ty); + if (addend_val.isUndef(mod)) return sema.addConstUndef(ty); } break :rs mulend1_src; }; @@ -21859,7 +21864,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const air_ref = try sema.resolveInst(extra.modifier); const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src); const modifier_val = try sema.resolveConstValue(block, modifier_src, modifier_ref, "call modifier must be comptime-known"); - var modifier = modifier_val.toEnum(std.builtin.CallModifier); + var modifier = mod.toEnum(std.builtin.CallModifier, modifier_val); switch (modifier) { // These can be upgraded to comptime or nosuspend calls. .auto, .never_tail, .no_async => { @@ -22111,8 +22116,8 @@ fn analyzeMinMax( runtime_known.unset(operand_idx); - if (cur_val.isUndef()) continue; // result is also undef - if (operand_val.isUndef()) { + if (cur_val.isUndef(mod)) continue; // result is also undef + if (operand_val.isUndef(mod)) { cur_minmax = try sema.addConstUndef(simd_op.result_ty); continue; } @@ -22165,7 +22170,7 @@ fn analyzeMinMax( var cur_max: Value = cur_min; for (1..len) |idx| { const elem_val = try val.elemValue(mod, idx); - if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef + if (elem_val.isUndef(mod)) break :blk orig_ty; // can't refine undef if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val; if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val; } @@ -22177,7 +22182,7 @@ fn analyzeMinMax( }); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats - if (val.isUndef()) break :blk orig_ty; // can't refine undef + if (val.isUndef(mod)) break :blk orig_ty; // can't refine undef break :blk try mod.intFittingRange(val, val); }; @@ -22205,7 +22210,7 @@ fn analyzeMinMax( // If the comptime-known part is undef we can avoid emitting actual instructions later const known_undef = if (cur_minmax) |operand| blk: { const val = (try sema.resolveMaybeUndefVal(operand)).?; - break :blk val.isUndef(); + break :blk val.isUndef(mod); } else false; if (cur_minmax == null) { @@ -22749,7 +22754,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk null; } - break :blk val.toEnum(std.builtin.AddressSpace); + break :blk mod.toEnum(std.builtin.AddressSpace, val); } else if (extra.data.bits.has_addrspace_ref) blk: { const addrspace_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; @@ -22759,7 +22764,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - break :blk addrspace_tv.val.toEnum(std.builtin.AddressSpace); + break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val); } else target_util.defaultAddressSpace(target, .function); const @"linksection": FuncLinkSection = if (extra.data.bits.has_section_body) blk: { @@ -22797,7 +22802,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk null; } - break :blk val.toEnum(std.builtin.CallingConvention); + break :blk mod.toEnum(std.builtin.CallingConvention, val); } else if (extra.data.bits.has_cc_ref) blk: { const cc_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; @@ -22807,7 +22812,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - break :blk cc_tv.val.toEnum(std.builtin.CallingConvention); + break :blk mod.toEnum(std.builtin.CallingConvention, cc_tv.val); } else if (sema.owner_decl.is_exported and has_body) .C else @@ -22994,9 +22999,9 @@ fn resolvePrefetchOptions( const cache_val = try sema.resolveConstValue(block, cache_src, cache, "prefetch cache must be comptime-known"); return std.builtin.PrefetchOptions{ - .rw = rw_val.toEnum(std.builtin.PrefetchOptions.Rw), + .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val), .locality = @intCast(u2, locality_val.toUnsignedInt(mod)), - .cache = cache_val.toEnum(std.builtin.PrefetchOptions.Cache), + .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val), }; } @@ -23059,7 +23064,7 @@ fn resolveExternOptions( const linkage_ref = try sema.fieldVal(block, src, options, "linkage", linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_ref, "linkage of the extern symbol must be comptime-known"); - const linkage = linkage_val.toEnum(std.builtin.GlobalLinkage); + const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src); const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known"); @@ -24140,7 +24145,7 @@ fn fieldVal( const field_index = @intCast(u32, field_index_usize); return sema.addConstant( enum_ty, - try Value.Tag.enum_field_index.create(sema.arena, field_index), + try mod.enumValueFieldIndex(enum_ty, field_index), ); } } @@ -24155,8 +24160,8 @@ fn fieldVal( const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); const field_index = @intCast(u32, field_index_usize); - const enum_val = try Value.Tag.enum_field_index.create(arena, field_index); - return sema.addConstant(try child_type.copy(arena), enum_val); + const enum_val = try mod.enumValueFieldIndex(child_type, field_index); + return sema.addConstant(child_type, enum_val); }, .Struct, .Opaque => { if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { @@ -24355,8 +24360,8 @@ fn fieldPtr( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try enum_ty.copy(anon_decl.arena()), - try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), + enum_ty, + try mod.enumValueFieldIndex(enum_ty, field_index_u32), 0, // default alignment )); } @@ -24376,8 +24381,8 @@ fn fieldPtr( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try child_type.copy(anon_decl.arena()), - try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), + child_type, + try mod.enumValueFieldIndex(child_type, field_index_u32), 0, // default alignment )); }, @@ -24850,7 +24855,7 @@ fn structFieldVal( } if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { - if (struct_val.isUndef()) return sema.addConstUndef(field.ty); + if (struct_val.isUndef(mod)) return sema.addConstUndef(field.ty); if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { return sema.addConstant(field.ty, opv); } @@ -24922,7 +24927,7 @@ fn tupleFieldValByIndex( } if (try sema.resolveMaybeUndefVal(tuple_byval)) |tuple_val| { - if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); + if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty); if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| { return sema.addConstant(field_ty, opv); } @@ -24983,19 +24988,15 @@ fn unionFieldPtr( .Auto => if (!initializing) { const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse break :ct; - if (union_val.isUndef()) { + if (union_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, src); } const tag_and_val = union_val.castTag(.@"union").?.data; - var field_tag_buf: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = enum_field_index, - }; - const field_tag = Value.initPayload(&field_tag_buf.base); + const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); if (!tag_matches) { const msg = msg: { - const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; + const active_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, mod).?; const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); errdefer msg.destroy(sema.gpa); @@ -25021,7 +25022,7 @@ fn unionFieldPtr( if (!initializing and union_obj.layout == .Auto and block.wantSafety() and union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1) { - const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val); // TODO would it be better if get_union_tag supported pointers to unions? const union_val = try block.addTyOp(.load, union_ty, union_ptr); @@ -25054,14 +25055,10 @@ fn unionFieldVal( const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| { - if (union_val.isUndef()) return sema.addConstUndef(field.ty); + if (union_val.isUndef(mod)) return sema.addConstUndef(field.ty); const tag_and_val = union_val.castTag(.@"union").?.data; - var field_tag_buf: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = enum_field_index, - }; - const field_tag = Value.initPayload(&field_tag_buf.base); + const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); switch (union_obj.layout) { .Auto => { @@ -25069,7 +25066,7 @@ fn unionFieldVal( return sema.addConstant(field.ty, tag_and_val.val); } else { const msg = msg: { - const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; + const active_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, mod).?; const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); errdefer msg.destroy(sema.gpa); @@ -25096,7 +25093,7 @@ fn unionFieldVal( if (union_obj.layout == .Auto and block.wantSafety() and union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1) { - const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val); const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval); try sema.panicInactiveUnionField(block, active_tag, wanted_tag); @@ -25364,7 +25361,7 @@ fn tupleField( } if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { - if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); + if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty); return sema.addConstant(field_ty, try tuple_val.fieldValue(tuple_ty, mod, field_index)); } @@ -25412,7 +25409,7 @@ fn elemValArray( } } if (maybe_undef_array_val) |array_val| { - if (array_val.isUndef()) { + if (array_val.isUndef(mod)) { return sema.addConstUndef(elem_ty); } if (maybe_index_val) |index_val| { @@ -25473,7 +25470,7 @@ fn elemPtrArray( const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset); if (maybe_undef_array_ptr_val) |array_ptr_val| { - if (array_ptr_val.isUndef()) { + if (array_ptr_val.isUndef(mod)) { return sema.addConstUndef(elem_ptr_ty); } if (offset) |index| { @@ -25580,7 +25577,7 @@ fn elemPtrSlice( const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset); if (maybe_undef_slice_val) |slice_val| { - if (slice_val.isUndef()) { + if (slice_val.isUndef(mod)) { return sema.addConstUndef(elem_ptr_ty); } const slice_len = slice_val.sliceLen(mod); @@ -25605,7 +25602,7 @@ fn elemPtrSlice( if (oob_safety and block.wantSafety()) { const len_inst = len: { if (maybe_undef_slice_val) |slice_val| - if (!slice_val.isUndef()) + if (!slice_val.isUndef(mod)) break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; @@ -25681,7 +25678,6 @@ fn coerceExtra( if (dest_ty.eql(inst_ty, mod)) return inst; - const arena = sema.arena; const maybe_inst_val = try sema.resolveMaybeUndefVal(inst); var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src); @@ -26175,7 +26171,7 @@ fn coerceExtra( }; return sema.addConstant( dest_ty, - try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)), + try mod.enumValueFieldIndex(dest_ty, @intCast(u32, field_index)), ); }, .Union => blk: { @@ -27858,8 +27854,9 @@ fn beginComptimePtrMutation( }, .Union => { const payload = try arena.create(Value.Payload.Union); + const tag_ty = parent.ty.unionTagTypeHypothetical(mod); payload.* = .{ .data = .{ - .tag = try Value.Tag.enum_field_index.create(arena, field_index), + .tag = try mod.enumValueFieldIndex(tag_ty, field_index), .val = Value.undef, } }; @@ -27934,11 +27931,10 @@ fn beginComptimePtrMutation( .@"union" => { // We need to set the active field of the union. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); const payload = &val_ptr.castTag(.@"union").?.data; - payload.tag = try Value.Tag.enum_field_index.create(arena, field_index); + payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); return beginComptimePtrMutationInner( sema, @@ -28575,7 +28571,7 @@ fn coerceCompatiblePtrs( const mod = sema.mod; const inst_ty = sema.typeOf(inst); if (try sema.resolveMaybeUndefVal(inst)) |val| { - if (!val.isUndef() and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) { + if (!val.isUndef(mod) and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) { return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } // The comptime Value representation is compatible with both types. @@ -29426,7 +29422,7 @@ fn analyzeSlicePtr( const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); const result_ty = slice_ty.slicePtrFieldType(buf, mod); if (try sema.resolveMaybeUndefVal(slice)) |val| { - if (val.isUndef()) return sema.addConstUndef(result_ty); + if (val.isUndef(mod)) return sema.addConstUndef(result_ty); return sema.addConstant(result_ty, val.slicePtr()); } try sema.requireRuntimeBlock(block, slice_src, null); @@ -29439,8 +29435,9 @@ fn analyzeSliceLen( src: LazySrcLoc, slice_inst: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(slice_inst)) |slice_val| { - if (slice_val.isUndef()) { + if (slice_val.isUndef(mod)) { return sema.addConstUndef(Type.usize); } return sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)); @@ -29459,7 +29456,7 @@ fn analyzeIsNull( const mod = sema.mod; const result_ty = Type.bool; if (try sema.resolveMaybeUndefVal(operand)) |opt_val| { - if (opt_val.isUndef()) { + if (opt_val.isUndef(mod)) { return sema.addConstUndef(result_ty); } const is_null = opt_val.isNull(mod); @@ -29588,7 +29585,7 @@ fn analyzeIsNonErrComptimeOnly( } if (maybe_operand_val) |err_union| { - if (err_union.isUndef()) { + if (err_union.isUndef(mod)) { return sema.addConstUndef(Type.bool); } if (err_union.getError() == null) { @@ -29768,7 +29765,7 @@ fn analyzeSlice( } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { if (try sema.resolveMaybeUndefVal(ptr_or_slice)) |slice_val| { - if (slice_val.isUndef()) { + if (slice_val.isUndef(mod)) { return sema.fail(block, src, "slice of undefined", .{}); } const has_sentinel = slice_ty.sentinel(mod) != null; @@ -29948,7 +29945,7 @@ fn analyzeSlice( return result; }; - if (!new_ptr_val.isUndef()) { + if (!new_ptr_val.isUndef(mod)) { return sema.addConstant(return_ty, new_ptr_val); } @@ -30069,19 +30066,19 @@ fn cmpNumeric( if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { // Compare ints: const vs. undefined (or vice versa) - if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef()) { + if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef(mod)) { try sema.resolveLazyValue(lhs_val); if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } - } else if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef()) { + } else if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef(mod)) { try sema.resolveLazyValue(rhs_val); if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { return sema.addConstUndef(Type.bool); } if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) { @@ -30097,7 +30094,7 @@ fn cmpNumeric( return Air.Inst.Ref.bool_false; } } else { - if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) { + if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) { // Compare ints: const vs. var try sema.resolveLazyValue(lhs_val); if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { @@ -30108,7 +30105,7 @@ fn cmpNumeric( } } else { if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) { + if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) { // Compare ints: var vs. const try sema.resolveLazyValue(rhs_val); if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { @@ -30177,7 +30174,7 @@ fn cmpNumeric( var lhs_bits: usize = undefined; if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| { try sema.resolveLazyValue(lhs_val); - if (lhs_val.isUndef()) + if (lhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); if (lhs_val.isNan(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, @@ -30236,7 +30233,7 @@ fn cmpNumeric( var rhs_bits: usize = undefined; if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { try sema.resolveLazyValue(rhs_val); - if (rhs_val.isUndef()) + if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); if (rhs_val.isNan(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, @@ -30441,7 +30438,7 @@ fn cmpVector( const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { return sema.addConstUndef(result_ty); } const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty); @@ -30558,11 +30555,12 @@ fn unionToTag( un: Air.Inst.Ref, un_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; if ((try sema.typeHasOnePossibleValue(enum_ty))) |opv| { return sema.addConstant(enum_ty, opv); } if (try sema.resolveMaybeUndefVal(un)) |un_val| { - return sema.addConstant(enum_ty, un_val.unionTag()); + return sema.addConstant(enum_ty, un_val.unionTag(mod)); } try sema.requireRuntimeBlock(block, un_src, null); return block.addTyOp(.get_union_tag, enum_ty, un); @@ -31718,6 +31716,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()), // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -31845,6 +31844,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .none => return ty, .u1_type, + .u5_type, .u8_type, .i8_type, .u16_type, @@ -31904,6 +31904,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .zero_u8 => unreachable, .one => unreachable, .one_usize => unreachable, + .one_u5 => unreachable, + .four_u5 => unreachable, .negative_one => unreachable, .calling_convention_c => unreachable, .calling_convention_inline => unreachable, @@ -32720,7 +32722,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } if (explicit_enum_info) |tag_info| { - const enum_index = tag_info.nameIndex(mod.intern_pool, field_name_ip) orelse { + const enum_index = tag_info.nameIndex(&mod.intern_pool, field_name_ip) orelse { const msg = msg: { const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, @@ -33186,19 +33188,30 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .opaque_type => null, .enum_type => |enum_type| switch (enum_type.tag_mode) { .nonexhaustive => { - if (enum_type.tag_ty != .comptime_int_type and - !(try sema.typeHasRuntimeBits(enum_type.tag_ty.toType()))) - { - return Value.enum_field_0; - } else { - return null; + if (enum_type.tag_ty == .comptime_int_type) return null; + + if (try sema.typeHasOnePossibleValue(enum_type.tag_ty.toType())) |int_opv| { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.ip_index, + .int = int_opv.ip_index, + } }); + return only.toValue(); } + + return null; }, .auto, .explicit => switch (enum_type.names.len) { 0 => return Value.@"unreachable", 1 => { if (enum_type.values.len == 0) { - return Value.enum_field_0; // auto-numbered + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.ip_index, + .int = try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }), + } }); + return only.toValue(); } else { return enum_type.values[0].toValue(); } @@ -33208,6 +33221,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -33397,8 +33411,9 @@ pub fn analyzeAddressSpace( zir_ref: Zir.Inst.Ref, ctx: AddressSpaceContext, ) !std.builtin.AddressSpace { + const mod = sema.mod; const addrspace_tv = try sema.resolveInstConst(block, src, zir_ref, "addresspace must be comptime-known"); - const address_space = addrspace_tv.val.toEnum(std.builtin.AddressSpace); + const address_space = mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val); const target = sema.mod.getTarget(); const arch = target.cpu.arch; @@ -33766,6 +33781,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()), // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -33921,9 +33937,9 @@ fn numberAddWrapScalar( rhs: Value, ty: Type, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; - const mod = sema.mod; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; + if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intAdd(lhs, rhs, ty); } @@ -33975,9 +33991,9 @@ fn numberSubWrapScalar( rhs: Value, ty: Type, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; - const mod = sema.mod; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; + if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intSub(lhs, rhs, ty); } @@ -34222,17 +34238,12 @@ fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { const mod = sema.mod; const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; assert(enum_type.tag_mode != .nonexhaustive); - if (enum_type.values.len == 0) { - // auto-numbered - return sema.intInRange(enum_type.tag_ty.toType(), int, enum_type.names.len); - } - // The `tagValueIndex` function call below relies on the type being the integer tag type. // `getCoerced` assumes the value will fit the new type. if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false; const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.ip_index, enum_type.tag_ty); - return enum_type.tagValueIndex(mod.intern_pool, int_coerced) != null; + return enum_type.tagValueIndex(&mod.intern_pool, int_coerced) != null; } fn intAddWithOverflow( diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 57ef662a9e21..a18f49b96ffe 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -197,9 +197,6 @@ pub fn print( }, .empty_array => return writer.writeAll(".{}"), .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), - .enum_field_index => { - return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data, mod)}); - }, .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .str_lit => { const str_lit = val.castTag(.str_lit).?.data; @@ -255,7 +252,7 @@ pub fn print( const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic }; - if (elem_val.isUndef()) break :str; + if (elem_val.isUndef(mod)) break :str; buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; } @@ -358,6 +355,20 @@ pub fn print( .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), }, + .enum_tag => |enum_tag| { + try writer.writeAll("@intToEnum("); + try print(.{ + .ty = Type.type, + .val = enum_tag.ty.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(", "); + try print(.{ + .ty = mod.intern_pool.typeOf(enum_tag.int).toType(), + .val = enum_tag.int.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(")"); + return; + }, .float => |float| switch (float.storage) { inline else => |x| return writer.print("{}", .{x}), }, @@ -414,7 +425,7 @@ fn printAggregate( var i: u32 = 0; while (i < max_len) : (i += 1) { const elem = try val.fieldValue(ty, mod, i); - if (elem.isUndef()) break :str; + if (elem.isUndef(mod)) break :str; buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; } diff --git a/src/Zir.zig b/src/Zir.zig index 34479cce5e81..136920d75d6a 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2052,6 +2052,7 @@ pub const Inst = struct { /// and `[]Ref`. pub const Ref = enum(u32) { u1_type = @enumToInt(InternPool.Index.u1_type), + u5_type = @enumToInt(InternPool.Index.u5_type), u8_type = @enumToInt(InternPool.Index.u8_type), i8_type = @enumToInt(InternPool.Index.i8_type), u16_type = @enumToInt(InternPool.Index.u16_type), @@ -2120,6 +2121,8 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), + one_u5 = @enumToInt(InternPool.Index.one_u5), + four_u5 = @enumToInt(InternPool.Index.four_u5), negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a2f4f81053ee..6ae516371409 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -11,6 +11,7 @@ const log = std.log.scoped(.codegen); const codegen = @import("../../codegen.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const Decl = Module.Decl; const Type = @import("../../type.zig").Type; const Value = @import("../../value.zig").Value; @@ -3044,11 +3045,12 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo( } fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; var val = arg_val; if (val.castTag(.runtime_value)) |rt| { val = rt.data; } - if (val.isUndefDeep()) return func.emitUndefined(ty); + if (val.isUndefDeep(mod)) return func.emitUndefined(ty); if (val.castTag(.decl_ref)) |decl_ref| { const decl_index = decl_ref.data; return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); @@ -3057,7 +3059,6 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { const decl_index = decl_ref_mut.data.decl_index; return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); } - const mod = func.bin_file.base.options.module.?; switch (ty.zigTypeTag(mod)) { .Void => return WValue{ .none = {} }, .Int => { @@ -3100,18 +3101,9 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, }, .Enum => { - if (val.castTag(.enum_field_index)) |field_index| { - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; - if (enum_type.values.len != 0) { - const tag_val = enum_type.values[field_index.data]; - return func.lowerConstant(tag_val.toValue(), enum_type.tag_ty.toType()); - } else { - return WValue{ .imm32 = field_index.data }; - } - } else { - const int_tag_ty = try ty.intTagType(mod); - return func.lowerConstant(val, int_tag_ty); - } + const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType()); }, .ErrorSet => switch (val.tag()) { .@"error" => { @@ -3223,37 +3215,42 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { /// Returns a `Value` as a signed 32 bit value. /// It's illegal to provide a value with a type that cannot be represented /// as an integer value. -fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) !i32 { +fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { const mod = func.bin_file.base.options.module.?; - switch (ty.zigTypeTag(mod)) { - .Enum => { - if (val.castTag(.enum_field_index)) |field_index| { - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; - if (enum_type.values.len != 0) { - const tag_val = enum_type.values[field_index.data]; - return func.valueAsI32(tag_val.toValue(), enum_type.tag_ty.toType()); - } else { - return @bitCast(i32, field_index.data); - } - } else { - const int_tag_ty = try ty.intTagType(mod); - return func.valueAsI32(val, int_tag_ty); - } - }, - .Int => switch (ty.intInfo(mod).signedness) { - .signed => return @truncate(i32, val.toSignedInt(mod)), - .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(mod))), + + switch (val.ip_index) { + .none => {}, + .bool_true => return 1, + .bool_false => return 0, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int), + .int => |int| intStorageAsI32(int.storage), + .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int), + else => unreachable, }, + } + + switch (ty.zigTypeTag(mod)) { .ErrorSet => { const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function return @bitCast(i32, kv.value); }, - .Bool => return @intCast(i32, val.toSignedInt(mod)), - .Pointer => return @intCast(i32, val.toSignedInt(mod)), else => unreachable, // Programmer called this function for an illegal type } } +fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index) i32 { + return intStorageAsI32(ip.indexToKey(int).int.storage); +} + +fn intStorageAsI32(storage: InternPool.Key.Int.Storage) i32 { + return switch (storage) { + .i64 => |x| @intCast(i32, x), + .u64 => |x| @bitCast(i32, @intCast(u32, x)), + .big_int => unreachable, + }; +} + fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; @@ -3772,7 +3769,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { for (items, 0..) |ref, i| { const item_val = (try func.air.value(ref, mod)).?; - const int_val = try func.valueAsI32(item_val, target_ty); + const int_val = func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; } @@ -5071,12 +5068,8 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const tag_int = blk: { const tag_ty = union_ty.unionTagTypeHypothetical(mod); - const enum_field_index = tag_ty.enumFieldIndex(field_name).?; - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, enum_field_index), - }; - const tag_val = Value.initPayload(&tag_val_payload.base); + const enum_field_index = tag_ty.enumFieldIndex(field_name, mod).?; + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); break :blk try func.lowerConstant(tag_val, tag_ty); }; if (layout.payload_size == 0) { @@ -6815,7 +6808,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse. // generate an if-else chain for each tag value as well as constant. - for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index| { + for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index_usize| { + const field_index = @intCast(u32, field_index_usize); const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); // for each tag name, create an unnamed const, // and then get a pointer to its value. @@ -6857,11 +6851,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.local_get)); try leb.writeULEB128(writer, @as(u32, 1)); - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - const tag_value = try func.lowerConstant(Value.initPayload(&tag_val_payload.base), enum_ty); + const tag_val = try mod.enumValueFieldIndex(enum_ty, field_index); + const tag_value = try func.lowerConstant(tag_val, enum_ty); switch (tag_value) { .imm32 => |value| { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 72f416ca8763..7e2e37667e18 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2029,13 +2029,10 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { exitlude_jump_relocs, enum_ty.enumFields(mod), 0.., - ) |*exitlude_jump_reloc, tag_name_ip, index| { + ) |*exitlude_jump_reloc, tag_name_ip, index_usize| { + const index = @intCast(u32, index_usize); const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); - var tag_pl = Value.Payload.U32{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, index), - }; - const tag_val = Value.initPayload(&tag_pl.base); + const tag_val = try mod.enumValueFieldIndex(enum_ty, index); const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val }); try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv); const skip_reloc = try self.asmJccReloc(undefined, .ne); @@ -11415,8 +11412,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const field_name = union_obj.fields.keys()[extra.field_index]; const tag_ty = union_obj.tag_ty; const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); - var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index }; - const tag_val = Value.initPayload(&tag_pl.base); + const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); const tag_int_val = try tag_val.enumToInt(tag_ty, mod); const tag_int = tag_int_val.toUnsignedInt(mod); const tag_off = if (layout.tag_align < layout.payload_align) diff --git a/src/codegen.zig b/src/codegen.zig index 148a69016ac1..90b6bfccf2fc 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -196,7 +196,7 @@ pub fn generateSymbol( typed_value.val.fmtValue(typed_value.ty, mod), }); - if (typed_value.val.isUndefDeep()) { + if (typed_value.val.isUndefDeep(mod)) { const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0xaa, abi_size); return Result.ok; @@ -1168,7 +1168,7 @@ pub fn genTypedValue( typed_value.val.fmtValue(typed_value.ty, mod), }); - if (typed_value.val.isUndef()) + if (typed_value.val.isUndef(mod)) return GenResult.mcv(.undef); const target = bin_file.options.target; @@ -1229,24 +1229,12 @@ pub fn genTypedValue( } }, .Enum => { - if (typed_value.val.castTag(.enum_field_index)) |field_index| { - const enum_type = mod.intern_pool.indexToKey(typed_value.ty.ip_index).enum_type; - if (enum_type.values.len != 0) { - const tag_val = enum_type.values[field_index.data]; - return genTypedValue(bin_file, src_loc, .{ - .ty = enum_type.tag_ty.toType(), - .val = tag_val.toValue(), - }, owner_decl_index); - } else { - return GenResult.mcv(.{ .immediate = field_index.data }); - } - } else { - const int_tag_ty = try typed_value.ty.intTagType(mod); - return genTypedValue(bin_file, src_loc, .{ - .ty = int_tag_ty, - .val = typed_value.val, - }, owner_decl_index); - } + const enum_tag = mod.intern_pool.indexToKey(typed_value.val.ip_index).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return genTypedValue(bin_file, src_loc, .{ + .ty = int_tag_ty.toType(), + .val = enum_tag.int.toValue(), + }, owner_decl_index); }, .ErrorSet => { switch (typed_value.val.tag()) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index c8e730354574..2ee7dab2fea5 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -748,7 +748,7 @@ pub const DeclGen = struct { .ReleaseFast, .ReleaseSmall => false, }; - if (val.isUndefDeep()) { + if (val.isUndefDeep(mod)) { switch (ty.zigTypeTag(mod)) { .Bool => { if (safety_on) { @@ -1183,7 +1183,7 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { const elem_val = try val.elemValue(mod, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try literal.writeChar(elem_val_u8); } if (ai.sentinel) |s| { @@ -1197,7 +1197,7 @@ pub const DeclGen = struct { while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); const elem_val = try val.elemValue(mod, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try writer.print("'\\x{x}'", .{elem_val_u8}); } if (ai.sentinel) |s| { @@ -1284,23 +1284,16 @@ pub const DeclGen = struct { try dg.renderValue(writer, error_ty, error_val, initializer_type); try writer.writeAll(" }"); }, - .Enum => { - switch (val.tag()) { - .enum_field_index => { - const field_index = val.castTag(.enum_field_index).?.data; - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; - if (enum_type.values.len != 0) { - const tag_val = enum_type.values[field_index]; - return dg.renderValue(writer, enum_type.tag_ty.toType(), tag_val.toValue(), location); - } else { - return writer.print("{d}", .{field_index}); - } - }, - else => { - const int_tag_ty = try ty.intTagType(mod); - return dg.renderValue(writer, int_tag_ty, val, location); - }, - } + .Enum => switch (val.ip_index) { + .none => { + const int_tag_ty = try ty.intTagType(mod); + return dg.renderValue(writer, int_tag_ty, val, location); + }, + else => { + const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location); + }, }, .Fn => switch (val.tag()) { .function => { @@ -2524,13 +2517,10 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try w.writeByte('('); try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete); try w.writeAll(") {\n switch (tag) {\n"); - for (enum_ty.enumFields(mod), 0..) |name_ip, index| { + for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| { + const index = @intCast(u32, index_usize); const name = mod.intern_pool.stringToSlice(name_ip); - var tag_pl: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, index), - }; - const tag_val = Value.initPayload(&tag_pl.base); + const tag_val = try mod.enumValueFieldIndex(enum_ty, index); const int_val = try tag_val.enumToInt(enum_ty, mod); @@ -3609,7 +3599,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.typeOf(bin_op.rhs); - const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep(mod) else false; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -4267,7 +4257,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const name = f.air.nullTerminatedString(pl_op.payload); - const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; + const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep(mod) else false; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -6290,7 +6280,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const value = try f.resolveInst(bin_op.rhs); const elem_ty = f.typeOf(bin_op.rhs); const elem_abi_size = elem_ty.abiSize(mod); - const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false; const writer = f.object.writer(); if (val_is_undef) { @@ -6907,11 +6897,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { if (layout.tag_size != 0) { const field_index = tag_ty.enumFieldIndex(field_name, mod).?; - var tag_pl: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - const tag_val = Value.initPayload(&tag_pl.base); + const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); const int_val = try tag_val.enumToInt(tag_ty, mod); @@ -7438,7 +7424,7 @@ fn formatIntLiteral( defer allocator.free(undef_limbs); var int_buf: Value.BigIntSpace = undefined; - const int = if (data.val.isUndefDeep()) blk: { + const int = if (data.val.isUndefDeep(mod)) blk: { undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits)); @memset(undef_limbs, undefPattern(BigIntLimb)); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index cc766c956227..e485b58c3569 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3233,16 +3233,16 @@ pub const DeclGen = struct { } fn lowerValue(dg: *DeclGen, arg_tv: TypedValue) Error!*llvm.Value { + const mod = dg.module; + const target = mod.getTarget(); var tv = arg_tv; if (tv.val.castTag(.runtime_value)) |rt| { tv.val = rt.data; } - if (tv.val.isUndef()) { + if (tv.val.isUndef(mod)) { const llvm_type = try dg.lowerType(tv.ty); return llvm_type.getUndef(); } - const mod = dg.module; - const target = mod.getTarget(); switch (tv.ty.zigTypeTag(mod)) { .Bool => { const llvm_type = try dg.lowerType(tv.ty); @@ -8204,7 +8204,7 @@ pub const FuncGen = struct { const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false; if (val_is_undef) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8496,7 +8496,7 @@ pub const FuncGen = struct { const is_volatile = ptr_ty.isVolatilePtr(mod); if (try self.air.value(bin_op.rhs, mod)) |elem_val| { - if (elem_val.isUndefDeep()) { + if (elem_val.isUndefDeep(mod)) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. @@ -8890,15 +8890,12 @@ pub const FuncGen = struct { const tag_int_value = fn_val.getParam(0); const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, enum_type.names.len)); - for (enum_type.names, 0..) |_, field_index| { + for (enum_type.names, 0..) |_, field_index_usize| { + const field_index = @intCast(u32, field_index_usize); const this_tag_int_value = int: { - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; break :int try self.dg.lowerValue(.{ .ty = enum_ty, - .val = Value.initPayload(&tag_val_payload.base), + .val = try mod.enumValueFieldIndex(enum_ty, field_index), }); }; switch_instr.addCase(this_tag_int_value, named_block); @@ -8973,7 +8970,8 @@ pub const FuncGen = struct { usize_llvm_ty.constNull(), usize_llvm_ty.constNull(), }; - for (enum_type.names, 0..) |name_ip, field_index| { + for (enum_type.names, 0..) |name_ip, field_index_usize| { + const field_index = @intCast(u32, field_index_usize); const name = mod.intern_pool.stringToSlice(name_ip); const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_init_llvm_ty = str_init.typeOf(); @@ -8997,16 +8995,10 @@ pub const FuncGen = struct { slice_global.setAlignment(slice_alignment); const return_block = self.context.appendBasicBlock(fn_val, "Name"); - const this_tag_int_value = int: { - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - break :int try self.dg.lowerValue(.{ - .ty = enum_ty, - .val = Value.initPayload(&tag_val_payload.base), - }); - }; + const this_tag_int_value = try self.dg.lowerValue(.{ + .ty = enum_ty, + .val = try mod.enumValueFieldIndex(enum_ty, field_index), + }); switch_instr.addCase(this_tag_int_value, return_block); self.builder.positionBuilderAtEnd(return_block); @@ -9094,7 +9086,7 @@ pub const FuncGen = struct { for (values, 0..) |*val, i| { const elem = try mask.elemValue(mod, i); - if (elem.isUndef()) { + if (elem.isUndef(mod)) { val.* = llvm_i32.getUndef(); } else { const int = elem.toSignedInt(mod); @@ -9419,11 +9411,7 @@ pub const FuncGen = struct { const tag_ty = union_ty.unionTagTypeHypothetical(mod); const union_field_name = union_obj.fields.keys()[extra.field_index]; const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?; - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, enum_field_index), - }; - const tag_val = Value.initPayload(&tag_val_payload.base); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); const tag_int_val = try tag_val.enumToInt(tag_ty, mod); break :blk tag_int_val.toUnsignedInt(mod); }; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 1176eb746d93..a81e36fefabc 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -614,7 +614,7 @@ pub const DeclGen = struct { const dg = self.dg; const mod = dg.module; - if (val.isUndef()) { + if (val.isUndef(mod)) { const size = ty.abiSize(mod); return try self.addUndef(size); } @@ -882,7 +882,7 @@ pub const DeclGen = struct { // const target = self.getTarget(); // TODO: Fix the resulting global linking for these paths. - // if (val.isUndef()) { + // if (val.isUndef(mod)) { // // Special case: the entire value is undefined. In this case, we can just // // generate an OpVariable with no initializer. // return try section.emit(self.spv.gpa, .OpVariable, .{ @@ -978,7 +978,7 @@ pub const DeclGen = struct { log.debug("constant: ty = {}, val = {}", .{ ty.fmt(self.module), val.fmtValue(ty, self.module) }); - if (val.isUndef()) { + if (val.isUndef(mod)) { return self.spv.constUndef(result_ty_ref); } @@ -2091,7 +2091,7 @@ pub const DeclGen = struct { var i: usize = 0; while (i < mask_len) : (i += 1) { const elem = try mask.elemValue(self.module, i); - if (elem.isUndef()) { + if (elem.isUndef(mod)) { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { const int = elem.toSignedInt(mod); diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 4e75cfff97a0..452356de2cac 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1304,7 +1304,7 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { const zig_ty = ty.zigTypeTag(mod); const val = decl.val; const index: u16 = blk: { - if (val.isUndefDeep()) { + if (val.isUndefDeep(mod)) { // TODO in release-fast and release-small, we should put undef in .bss break :blk self.data_section_index.?; } diff --git a/src/link/Elf.zig b/src/link/Elf.zig index c80d60d72a24..b27967884eb5 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2456,7 +2456,7 @@ fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 { const zig_ty = ty.zigTypeTag(mod); const val = decl.val; const shdr_index: u16 = blk: { - if (val.isUndefDeep()) { + if (val.isUndefDeep(mod)) { // TODO in release-fast and release-small, we should put undef in .bss break :blk self.data_section_index.?; } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 06f79cf3fb56..e7723595dbe2 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -2270,7 +2270,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { const single_threaded = self.base.options.single_threaded; const sect_id: u8 = blk: { // TODO finish and audit this function - if (val.isUndefDeep()) { + if (val.isUndefDeep(mod)) { if (mode == .ReleaseFast or mode == .ReleaseSmall) { @panic("TODO __DATA,__bss"); } else { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index ddf5130fd291..ef97a7fa7f33 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -3374,7 +3374,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod } else if (decl.getVariable()) |variable| { if (!variable.is_mutable) { try wasm.parseAtom(atom_index, .{ .data = .read_only }); - } else if (variable.init.isUndefDeep()) { + } else if (variable.init.isUndefDeep(mod)) { // for safe build modes, we store the atom in the data segment, // whereas for unsafe build modes we store it in bss. const is_initialized = wasm.base.options.optimize_mode == .Debug or diff --git a/src/type.zig b/src/type.zig index 8358f3678def..d051191bfe30 100644 --- a/src/type.zig +++ b/src/type.zig @@ -126,6 +126,7 @@ pub const Type = struct { }, // values, not types + .undef => unreachable, .un => unreachable, .extern_func => unreachable, .int => unreachable, @@ -1350,6 +1351,7 @@ pub const Type = struct { }, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -1600,6 +1602,7 @@ pub const Type = struct { .enum_type => |enum_type| enum_type.tag_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -1713,6 +1716,7 @@ pub const Type = struct { }, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -2104,6 +2108,7 @@ pub const Type = struct { .enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) }, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -2499,6 +2504,7 @@ pub const Type = struct { .enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = enum_type.tag_ty.toType().abiSize(mod) }, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -2736,6 +2742,7 @@ pub const Type = struct { .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema), // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -3492,6 +3499,7 @@ pub const Type = struct { .opaque_type => unreachable, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -3826,19 +3834,30 @@ pub const Type = struct { .opaque_type => return null, .enum_type => |enum_type| switch (enum_type.tag_mode) { .nonexhaustive => { - if (enum_type.tag_ty != .comptime_int_type and - !enum_type.tag_ty.toType().hasRuntimeBits(mod)) - { - return Value.enum_field_0; - } else { - return null; + if (enum_type.tag_ty == .comptime_int_type) return null; + + if (try enum_type.tag_ty.toType().onePossibleValue(mod)) |int_opv| { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.ip_index, + .int = int_opv.ip_index, + } }); + return only.toValue(); } + + return null; }, .auto, .explicit => switch (enum_type.names.len) { 0 => return Value.@"unreachable", 1 => { if (enum_type.values.len == 0) { - return Value.enum_field_0; // auto-numbered + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.ip_index, + .int = try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }), + } }); + return only.toValue(); } else { return enum_type.values[0].toValue(); } @@ -3848,6 +3867,7 @@ pub const Type = struct { }, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -4006,6 +4026,7 @@ pub const Type = struct { .enum_type => |enum_type| enum_type.tag_ty.toType().comptimeOnly(mod), // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -4224,36 +4245,22 @@ pub const Type = struct { return ip.stringToSlice(field_name); } - pub fn enumFieldIndex(ty: Type, field_name: []const u8, mod: *Module) ?usize { + pub fn enumFieldIndex(ty: Type, field_name: []const u8, mod: *Module) ?u32 { const ip = &mod.intern_pool; const enum_type = ip.indexToKey(ty.ip_index).enum_type; // If the string is not interned, then the field certainly is not present. const field_name_interned = ip.getString(field_name).unwrap() orelse return null; - return enum_type.nameIndex(ip.*, field_name_interned); + return enum_type.nameIndex(ip, field_name_interned); } /// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or /// an integer which represents the enum value. Returns the field index in /// declaration order, or `null` if `enum_tag` does not match any field. - pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?usize { - if (enum_tag.castTag(.enum_field_index)) |payload| { - return @as(usize, payload.data); - } + pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { const ip = &mod.intern_pool; const enum_type = ip.indexToKey(ty.ip_index).enum_type; - const tag_ty = enum_type.tag_ty.toType(); - if (enum_type.values.len == 0) { - if (enum_tag.compareAllWithZero(.lt, mod)) return null; - const end_val = mod.intValue(tag_ty, enum_type.names.len) catch |err| switch (err) { - // TODO: eliminate this failure condition - error.OutOfMemory => @panic("OOM"), - }; - if (enum_tag.compareScalar(.gte, end_val, tag_ty, mod)) return null; - return @intCast(usize, enum_tag.toUnsignedInt(mod)); - } else { - assert(ip.typeOf(enum_tag.ip_index) == enum_type.tag_ty); - return enum_type.tagValueIndex(ip.*, enum_tag.ip_index); - } + assert(ip.typeOf(enum_tag.ip_index) == enum_type.tag_ty); + return enum_type.tagValueIndex(ip, enum_tag.ip_index); } pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields { diff --git a/src/value.zig b/src/value.zig index bb3716d28ecb..84408424f048 100644 --- a/src/value.zig +++ b/src/value.zig @@ -73,8 +73,6 @@ pub const Value = struct { /// Pointer and length as sub `Value` objects. slice, enum_literal, - /// A specific enum tag, indicated by the field index (declaration order). - enum_field_index, @"error", /// When the type is error union: /// * If the tag is `.@"error"`, the error union is an error. @@ -143,8 +141,6 @@ pub const Value = struct { .str_lit => Payload.StrLit, .slice => Payload.Slice, - .enum_field_index => Payload.U32, - .ty, .lazy_align, .lazy_size, @@ -397,7 +393,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .enum_field_index => return self.copyPayloadShallow(arena, Payload.U32), .@"error" => return self.copyPayloadShallow(arena, Payload.Error), .aggregate => { @@ -515,7 +510,6 @@ pub const Value = struct { }, .empty_array => return out_stream.writeAll(".{}"), .enum_literal => return out_stream.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), - .enum_field_index => return out_stream.print("(enum field {d})", .{val.castTag(.enum_field_index).?.data}), .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .str_lit => { const str_lit = val.castTag(.str_lit).?.data; @@ -618,87 +612,58 @@ pub const Value = struct { }; } - /// Asserts the type is an enum type. - pub fn toEnum(val: Value, comptime E: type) E { + pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { + const ip = &mod.intern_pool; switch (val.ip_index) { - .calling_convention_c => { - if (E == std.builtin.CallingConvention) { - return .C; + .none => { + const field_index = switch (val.tag()) { + .the_only_possible_value => blk: { + assert(ty.enumFieldCount(mod) == 1); + break :blk 0; + }, + .enum_literal => i: { + const name = val.castTag(.enum_literal).?.data; + break :i ty.enumFieldIndex(name, mod).?; + }, + else => unreachable, + }; + const enum_type = ip.indexToKey(ty.ip_index).enum_type; + if (enum_type.values.len != 0) { + return enum_type.values[field_index].toValue(); } else { - unreachable; + // Field index and integer values are the same. + return mod.intValue(enum_type.tag_ty.toType(), field_index); } }, - .calling_convention_inline => { - if (E == std.builtin.CallingConvention) { - return .Inline; - } else { - unreachable; - } + else => { + const enum_type = ip.indexToKey(ip.typeOf(val.ip_index)).enum_type; + const int = try ip.getCoerced(mod.gpa, val.ip_index, enum_type.tag_ty); + return int.toValue(); }, - .none => switch (val.tag()) { - .enum_field_index => { - const field_index = val.castTag(.enum_field_index).?.data; - return @intToEnum(E, field_index); - }, - .the_only_possible_value => { - const fields = std.meta.fields(E); - assert(fields.len == 1); - return @intToEnum(E, fields[0].value); - }, - else => unreachable, - }, - else => unreachable, } } - pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { - const field_index = switch (val.tag()) { - .enum_field_index => val.castTag(.enum_field_index).?.data, - .the_only_possible_value => blk: { - assert(ty.enumFieldCount(mod) == 1); - break :blk 0; - }, - .enum_literal => i: { - const name = val.castTag(.enum_literal).?.data; - break :i ty.enumFieldIndex(name, mod).?; - }, - // Assume it is already an integer and return it directly. - else => return val, - }; + pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { + _ = ty; // TODO: remove this parameter now that we use InternPool - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; - if (enum_type.values.len != 0) { - return enum_type.values[field_index].toValue(); - } else { - // Field index and integer values are the same. - return mod.intValue(enum_type.tag_ty.toType(), field_index); + if (val.castTag(.enum_literal)) |payload| { + return payload.data; } - } - - pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { - if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(mod), mod); - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + const ip = &mod.intern_pool; - const field_index = switch (val.tag()) { - .enum_field_index => val.castTag(.enum_field_index).?.data, - .the_only_possible_value => blk: { - assert(ty.enumFieldCount(mod) == 1); - break :blk 0; - }, - .enum_literal => return val.castTag(.enum_literal).?.data, - else => field_index: { - if (enum_type.values.len == 0) { - // auto-numbered enum - break :field_index @intCast(u32, val.toUnsignedInt(mod)); - } - const field_index = enum_type.tagValueIndex(mod.intern_pool, val.ip_index).?; - break :field_index @intCast(u32, field_index); - }, + const enum_tag = switch (ip.indexToKey(val.ip_index)) { + .un => |un| ip.indexToKey(un.tag).enum_tag, + .enum_tag => |x| x, + else => unreachable, + }; + const enum_type = ip.indexToKey(enum_tag.ty).enum_type; + const field_index = field_index: { + const field_index = enum_type.tagValueIndex(ip, val.ip_index).?; + break :field_index @intCast(u32, field_index); }; - const field_name = enum_type.names[field_index]; - return mod.intern_pool.stringToSlice(field_name); + return ip.stringToSlice(field_name); } /// Asserts the value is an integer. @@ -722,10 +687,6 @@ pub const Value = struct { .the_only_possible_value, // i0, u0 => BigIntMutable.init(&space.limbs, 0).toConst(), - .enum_field_index => { - const index = val.castTag(.enum_field_index).?.data; - return BigIntMutable.init(&space.limbs, index).toConst(); - }, .runtime_value => { const sub_val = val.castTag(.runtime_value).?.data; return sub_val.toBigIntAdvanced(space, mod, opt_sema); @@ -759,6 +720,7 @@ pub const Value = struct { }, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| int.storage.toBigInt(space), + .enum_tag => |enum_tag| mod.intern_pool.indexToKey(enum_tag.int).int.storage.toBigInt(space), else => unreachable, }, }; @@ -886,7 +848,7 @@ pub const Value = struct { }!void { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - if (val.isUndef()) { + if (val.isUndef(mod)) { const size = @intCast(usize, ty.abiSize(mod)); @memset(buffer[0..size], 0xaa); return; @@ -1007,7 +969,7 @@ pub const Value = struct { ) error{ ReinterpretDeclRef, OutOfMemory }!void { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - if (val.isUndef()) { + if (val.isUndef(mod)) { const bit_size = @intCast(usize, ty.bitSize(mod)); std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian); return; @@ -1087,7 +1049,7 @@ pub const Value = struct { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => unreachable, // Handled in non-packed writeToMemory .Packed => { - const field_index = ty.unionTagFieldIndex(val.unionTag(), mod); + const field_index = ty.unionTagFieldIndex(val.unionTag(mod), mod); const field_type = ty.unionFields(mod).values()[field_index.?].ty; const field_val = try val.fieldValue(field_type, mod, field_index.?); @@ -1432,7 +1394,7 @@ pub const Value = struct { } pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { - assert(!val.isUndef()); + assert(!val.isUndef(mod)); switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, @@ -1450,7 +1412,7 @@ pub const Value = struct { } pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { - assert(!val.isUndef()); + assert(!val.isUndef(mod)); const info = ty.intInfo(mod); @@ -1468,7 +1430,7 @@ pub const Value = struct { } pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { - assert(!val.isUndef()); + assert(!val.isUndef(mod)); const info = ty.intInfo(mod); @@ -1578,7 +1540,6 @@ pub const Value = struct { .variable, => .gt, - .enum_field_index => return std.math.order(lhs.castTag(.enum_field_index).?.data, 0), .runtime_value => { // This is needed to correctly handle hashing the value. // Checks in Sema should prevent direct comparisons from reaching here. @@ -1633,6 +1594,10 @@ pub const Value = struct { .big_int => |big_int| big_int.orderAgainstScalar(0), inline .u64, .i64 => |x| std.math.order(x, 0), }, + .enum_tag => |enum_tag| switch (mod.intern_pool.indexToKey(enum_tag.int).int.storage) { + .big_int => |big_int| big_int.orderAgainstScalar(0), + inline .u64, .i64 => |x| std.math.order(x, 0), + }, .float => |float| switch (float.storage) { inline else => |x| std.math.order(x, 0), }, @@ -1861,11 +1826,6 @@ pub const Value = struct { const b_name = b.castTag(.enum_literal).?.data; return std.mem.eql(u8, a_name, b_name); }, - .enum_field_index => { - const a_field_index = a.castTag(.enum_field_index).?.data; - const b_field_index = b.castTag(.enum_field_index).?.data; - return a_field_index == b_field_index; - }, .opt_payload => { const a_payload = a.castTag(.opt_payload).?.data; const b_payload = b.castTag(.opt_payload).?.data; @@ -2064,13 +2024,9 @@ pub const Value = struct { } const field_name = tuple.names[0]; const union_obj = mod.typeToUnion(ty).?; - const field_index = union_obj.fields.getIndex(field_name) orelse return false; + const field_index = @intCast(u32, union_obj.fields.getIndex(field_name) orelse return false); const tag_and_val = b.castTag(.@"union").?.data; - var field_tag_buf: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - const field_tag = Value.initPayload(&field_tag_buf.base); + const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, field_index); const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); if (!tag_matches) return false; return eqlAdvanced(tag_and_val.val, union_obj.tag_ty, tuple.values[0], tuple.types[0], mod, opt_sema); @@ -2132,7 +2088,7 @@ pub const Value = struct { } const zig_ty_tag = ty.zigTypeTag(mod); std.hash.autoHash(hasher, zig_ty_tag); - if (val.isUndef()) return; + if (val.isUndef(mod)) return; // The value is runtime-known and shouldn't affect the hash. if (val.isRuntimeValue()) return; @@ -2277,7 +2233,7 @@ pub const Value = struct { /// This function is used by hash maps and so treats floating-point NaNs as equal /// to each other, and not equal to other floating-point values. pub fn hashUncoerced(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - if (val.isUndef()) return; + if (val.isUndef(mod)) return; // The value is runtime-known and shouldn't affect the hash. if (val.isRuntimeValue()) return; @@ -2726,16 +2682,12 @@ pub const Value = struct { } } - pub fn unionTag(val: Value) Value { - switch (val.ip_index) { - .undef => return val, - .none => switch (val.tag()) { - .enum_field_index => return val, - .@"union" => return val.castTag(.@"union").?.data.tag, - else => unreachable, - }, + pub fn unionTag(val: Value, mod: *Module) Value { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .undef, .enum_tag => val, + .un => |un| un.tag.toValue(), else => unreachable, - } + }; } /// Returns a pointer to the element value at the index. @@ -2769,27 +2721,30 @@ pub const Value = struct { }); } - pub fn isUndef(val: Value) bool { - return val.ip_index == .undef; + pub fn isUndef(val: Value, mod: *Module) bool { + if (val.ip_index == .none) return false; + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .undef => true, + .simple_value => |v| v == .undefined, + else => false, + }; } /// TODO: check for cases such as array that is not marked undef but all the element /// values are marked undef, or struct that is not marked undef but all fields are marked /// undef, etc. - pub fn isUndefDeep(val: Value) bool { - return val.isUndef(); + pub fn isUndefDeep(val: Value, mod: *Module) bool { + return val.isUndef(mod); } /// Returns true if any value contained in `self` is undefined. - /// TODO: check for cases such as array that is not marked undef but all the element - /// values are marked undef, or struct that is not marked undef but all fields are marked - /// undef, etc. - pub fn anyUndef(self: Value, mod: *Module) !bool { - switch (self.ip_index) { + pub fn anyUndef(val: Value, mod: *Module) !bool { + if (val.ip_index == .none) return false; + switch (val.ip_index) { .undef => return true, - .none => switch (self.tag()) { + .none => switch (val.tag()) { .slice => { - const payload = self.castTag(.slice).?; + const payload = val.castTag(.slice).?; const len = payload.data.len.toUnsignedInt(mod); for (0..len) |i| { @@ -2799,14 +2754,21 @@ pub const Value = struct { }, .aggregate => { - const payload = self.castTag(.aggregate).?; - for (payload.data) |val| { - if (try val.anyUndef(mod)) return true; + const payload = val.castTag(.aggregate).?; + for (payload.data) |field| { + if (try field.anyUndef(mod)) return true; } }, else => {}, }, - else => {}, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .undef => return true, + .simple_value => |v| if (v == .undefined) return true, + .aggregate => |aggregate| for (aggregate.fields) |field| { + if (try anyUndef(field.toValue(), mod)) return true; + }, + else => {}, + }, } return false; @@ -2819,11 +2781,7 @@ pub const Value = struct { .undef => unreachable, .unreachable_value => unreachable, - .null_value, - .zero, - .zero_usize, - .zero_u8, - => true, + .null_value => true, .none => switch (val.tag()) { .opt_payload => false, @@ -2843,6 +2801,7 @@ pub const Value = struct { .big_int => |big_int| big_int.eqZero(), inline .u64, .i64 => |x| x == 0, }, + .opt => |opt| opt.val == .none, else => unreachable, }, }; @@ -3024,8 +2983,8 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - assert(!lhs.isUndef()); - assert(!rhs.isUndef()); + assert(!lhs.isUndef(mod)); + assert(!rhs.isUndef(mod)); const info = ty.intInfo(mod); @@ -3071,8 +3030,8 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - assert(!lhs.isUndef()); - assert(!rhs.isUndef()); + assert(!lhs.isUndef(mod)); + assert(!rhs.isUndef(mod)); const info = ty.intInfo(mod); @@ -3178,7 +3137,7 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; if (ty.zigTypeTag(mod) == .ComptimeInt) { return intMul(lhs, rhs, ty, arena, mod); @@ -3220,8 +3179,8 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - assert(!lhs.isUndef()); - assert(!rhs.isUndef()); + assert(!lhs.isUndef(mod)); + assert(!rhs.isUndef(mod)); const info = ty.intInfo(mod); @@ -3249,7 +3208,7 @@ pub const Value = struct { /// Supports both floats and ints; handles undefined. pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value { - if (lhs.isUndef() or rhs.isUndef()) return undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef; if (lhs.isNan(mod)) return rhs; if (rhs.isNan(mod)) return lhs; @@ -3261,7 +3220,7 @@ pub const Value = struct { /// Supports both floats and ints; handles undefined. pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value { - if (lhs.isUndef() or rhs.isUndef()) return undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef; if (lhs.isNan(mod)) return rhs; if (rhs.isNan(mod)) return lhs; @@ -3286,7 +3245,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (val.isUndef()) return Value.undef; + if (val.isUndef(mod)) return Value.undef; const info = ty.intInfo(mod); @@ -3324,7 +3283,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -3358,7 +3317,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty); @@ -3381,7 +3340,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -3415,7 +3374,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -4697,11 +4656,6 @@ pub const Value = struct { pub const Payload = struct { tag: Tag, - pub const U32 = struct { - base: Payload, - data: u32, - }; - pub const Function = struct { base: Payload, data: *Module.Fn, @@ -4885,16 +4839,6 @@ pub const Value = struct { pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type, .legacy = undefined }; pub const empty_struct: Value = .{ .ip_index = .empty_struct, .legacy = undefined }; - pub const enum_field_0: Value = .{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &enum_field_0_payload.base }, - }; - - var enum_field_0_payload: Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = 0, - }; - pub fn makeBool(x: bool) Value { return if (x) Value.true else Value.false; } From d18881de1be811c1dff52590223b92c916c4b773 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 14 May 2023 19:23:41 -0700 Subject: [PATCH 066/205] stage2: move anon tuples and anon structs to InternPool --- src/InternPool.zig | 203 ++++++++- src/Sema.zig | 715 +++++++++++++++++--------------- src/TypedValue.zig | 20 +- src/arch/x86_64/CodeGen.zig | 2 +- src/codegen/c.zig | 56 ++- src/codegen/c/type.zig | 6 +- src/codegen/llvm.zig | 518 +++++++++++------------ src/codegen/spirv.zig | 11 +- src/link/Dwarf.zig | 22 +- src/type.zig | 806 +++++++++++------------------------- src/value.zig | 52 +-- 11 files changed, 1147 insertions(+), 1264 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index eace006d4cbb..74cc452176f8 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -137,9 +137,14 @@ pub const Key = union(enum) { payload_type: Index, }, simple_type: SimpleType, - /// If `empty_struct_type` is handled separately, then this value may be - /// safely assumed to never be `none`. + /// This represents a struct that has been explicitly declared in source code, + /// or was created with `@Type`. It is unique and based on a declaration. + /// It may be a tuple, if declared like this: `struct {A, B, C}`. struct_type: StructType, + /// This is an anonymous struct or tuple type which has no corresponding + /// declaration. It is used for types that have no `struct` keyword in the + /// source code, and were not created via `@Type`. + anon_struct_type: AnonStructType, union_type: UnionType, opaque_type: OpaqueType, enum_type: EnumType, @@ -168,7 +173,7 @@ pub const Key = union(enum) { /// Each element/field stored as an `Index`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, /// so the slice length will be one more than the type's array length. - aggregate: Aggregate, + aggregate: Key.Aggregate, /// An instance of a union. un: Union, @@ -222,22 +227,25 @@ pub const Key = union(enum) { namespace: Module.Namespace.Index, }; - /// There are three possibilities here: - /// * `@TypeOf(.{})` (untyped empty struct literal) - /// - namespace == .none, index == .none - /// * A struct which has a namepace, but no fields. - /// - index == .none - /// * A struct which has fields as well as a namepace. pub const StructType = struct { - /// The `none` tag is used to represent two cases: - /// * `@TypeOf(.{})`, in which case `namespace` will also be `none`. - /// * A struct with no fields, in which case `namespace` will be populated. + /// The `none` tag is used to represent a struct with no fields. index: Module.Struct.OptionalIndex, - /// This will be `none` only in the case of `@TypeOf(.{})` - /// (`Index.empty_struct_type`). + /// May be `none` if the struct has no declarations. namespace: Module.Namespace.OptionalIndex, }; + pub const AnonStructType = struct { + types: []const Index, + /// This may be empty, indicating this is a tuple. + names: []const NullTerminatedString, + /// These elements may be `none`, indicating runtime-known. + values: []const Index, + + pub fn isTuple(self: AnonStructType) bool { + return self.names.len == 0; + } + }; + pub const UnionType = struct { index: Module.Union.Index, runtime_tag: RuntimeTag, @@ -498,6 +506,12 @@ pub const Key = union(enum) { std.hash.autoHash(hasher, aggregate.ty); for (aggregate.fields) |field| std.hash.autoHash(hasher, field); }, + + .anon_struct_type => |anon_struct_type| { + for (anon_struct_type.types) |elem| std.hash.autoHash(hasher, elem); + for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem); + for (anon_struct_type.names) |elem| std.hash.autoHash(hasher, elem); + }, } } @@ -650,6 +664,12 @@ pub const Key = union(enum) { if (a_info.ty != b_info.ty) return false; return std.mem.eql(Index, a_info.fields, b_info.fields); }, + .anon_struct_type => |a_info| { + const b_info = b.anon_struct_type; + return std.mem.eql(Index, a_info.types, b_info.types) and + std.mem.eql(Index, a_info.values, b_info.values) and + std.mem.eql(NullTerminatedString, a_info.names, b_info.names); + }, } } @@ -666,6 +686,7 @@ pub const Key = union(enum) { .union_type, .opaque_type, .enum_type, + .anon_struct_type, => .type_type, inline .ptr, @@ -1020,9 +1041,10 @@ pub const static_keys = [_]Key{ .{ .simple_type = .var_args_param }, // empty_struct_type - .{ .struct_type = .{ - .namespace = .none, - .index = .none, + .{ .anon_struct_type = .{ + .types = &.{}, + .names = &.{}, + .values = &.{}, } }, .{ .simple_value = .undefined }, @@ -1144,6 +1166,12 @@ pub const Tag = enum(u8) { /// Module.Struct object allocated for it. /// data is Module.Namespace.Index. type_struct_ns, + /// An AnonStructType which stores types, names, and values for each field. + /// data is extra index of `TypeStructAnon`. + type_struct_anon, + /// An AnonStructType which has only types and values for each field. + /// data is extra index of `TypeStructAnon`. + type_tuple_anon, /// A tagged union type. /// `data` is `Module.Union.Index`. type_union_tagged, @@ -1249,6 +1277,26 @@ pub const Tag = enum(u8) { only_possible_value, /// data is extra index to Key.Union. union_value, + /// An instance of a struct, array, or vector. + /// data is extra index to `Aggregate`. + aggregate, +}; + +/// Trailing: +/// 0. element: Index for each len +/// len is determined by the aggregate type. +pub const Aggregate = struct { + /// The type of the aggregate. + ty: Index, +}; + +/// Trailing: +/// 0. type: Index for each fields_len +/// 1. value: Index for each fields_len +/// 2. name: NullTerminatedString for each fields_len +/// The set of field names is omitted when the `Tag` is `type_tuple_anon`. +pub const TypeStructAnon = struct { + fields_len: u32, }; /// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to @@ -1572,6 +1620,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { } pub fn indexToKey(ip: InternPool, index: Index) Key { + assert(index != .none); const item = ip.items.get(@enumToInt(index)); const data = item.data; return switch (item.tag) { @@ -1659,6 +1708,30 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .namespace = @intToEnum(Module.Namespace.Index, data).toOptional(), } }, + .type_struct_anon => { + const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data); + const fields_len = type_struct_anon.data.fields_len; + const types = ip.extra.items[type_struct_anon.end..][0..fields_len]; + const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + const names = ip.extra.items[type_struct_anon.end + 2 * fields_len ..][0..fields_len]; + return .{ .anon_struct_type = .{ + .types = @ptrCast([]const Index, types), + .values = @ptrCast([]const Index, values), + .names = @ptrCast([]const NullTerminatedString, names), + } }; + }, + .type_tuple_anon => { + const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data); + const fields_len = type_struct_anon.data.fields_len; + const types = ip.extra.items[type_struct_anon.end..][0..fields_len]; + const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + return .{ .anon_struct_type = .{ + .types = @ptrCast([]const Index, types), + .values = @ptrCast([]const Index, values), + .names = &.{}, + } }; + }, + .type_union_untagged => .{ .union_type = .{ .index = @intToEnum(Module.Union.Index, data), .runtime_tag = .none, @@ -1797,6 +1870,15 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { else => unreachable, }; }, + .aggregate => { + const extra = ip.extraDataTrail(Aggregate, data); + const len = @intCast(u32, ip.aggregateTypeLen(extra.data.ty)); + const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]); + return .{ .aggregate = .{ + .ty = extra.data.ty, + .fields = fields, + } }; + }, .union_value => .{ .un = ip.extraData(Key.Union, data) }, .enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) }, }; @@ -1982,6 +2064,45 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, + .anon_struct_type => |anon_struct_type| { + assert(anon_struct_type.types.len == anon_struct_type.values.len); + for (anon_struct_type.types) |elem| assert(elem != .none); + + const fields_len = @intCast(u32, anon_struct_type.types.len); + if (anon_struct_type.names.len == 0) { + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 2), + ); + ip.items.appendAssumeCapacity(.{ + .tag = .type_tuple_anon, + .data = ip.addExtraAssumeCapacity(TypeStructAnon{ + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values)); + return @intToEnum(Index, ip.items.len - 1); + } + + assert(anon_struct_type.names.len == anon_struct_type.types.len); + + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3), + ); + ip.items.appendAssumeCapacity(.{ + .tag = .type_struct_anon, + .data = ip.addExtraAssumeCapacity(TypeStructAnon{ + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.names)); + return @intToEnum(Index, ip.items.len - 1); + }, + .union_type => |union_type| { ip.items.appendAssumeCapacity(.{ .tag = switch (union_type.runtime_tag) { @@ -2269,6 +2390,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .aggregate => |aggregate| { + assert(aggregate.ty != .none); + for (aggregate.fields) |elem| assert(elem != .none); + if (aggregate.fields.len != ip.aggregateTypeLen(aggregate.ty)) { + std.debug.print("aggregate fields len = {d}, type len = {d}\n", .{ + aggregate.fields.len, + ip.aggregateTypeLen(aggregate.ty), + }); + } + assert(aggregate.fields.len == ip.aggregateTypeLen(aggregate.ty)); + if (aggregate.fields.len == 0) { ip.items.appendAssumeCapacity(.{ .tag = .only_possible_value, @@ -2276,7 +2407,19 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); return @intToEnum(Index, ip.items.len - 1); } - @panic("TODO"); + + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Aggregate).Struct.fields.len + aggregate.fields.len, + ); + + ip.items.appendAssumeCapacity(.{ + .tag = .aggregate, + .data = ip.addExtraAssumeCapacity(Aggregate{ + .ty = aggregate.ty, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.fields)); }, .un => |un| { @@ -2913,6 +3056,14 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_opaque => @sizeOf(Key.OpaqueType), .type_struct => @sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), .type_struct_ns => @sizeOf(Module.Namespace), + .type_struct_anon => b: { + const info = ip.extraData(TypeStructAnon, data); + break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len); + }, + .type_tuple_anon => b: { + const info = ip.extraData(TypeStructAnon, data); + break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len); + }, .type_union_tagged, .type_union_untagged, @@ -2942,6 +3093,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { }, .enum_tag => @sizeOf(Key.EnumTag), + .aggregate => b: { + const info = ip.extraData(Aggregate, data); + const fields_len = @intCast(u32, ip.aggregateTypeLen(info.ty)); + break :b @sizeOf(Aggregate) + (@sizeOf(u32) * fields_len); + }, + .float_f16 => 0, .float_f32 => 0, .float_f64 => @sizeOf(Float64), @@ -3079,3 +3236,13 @@ pub fn toEnum(ip: InternPool, comptime E: type, i: Index) E { const int = ip.indexToKey(i).enum_tag.int; return @intToEnum(E, ip.indexToKey(int).int.storage.u64); } + +pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { + return switch (ip.indexToKey(ty)) { + .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .array_type => |array_type| array_type.len, + .vector_type => |vector_type| vector_type.len, + else => unreachable, + }; +} diff --git a/src/Sema.zig b/src/Sema.zig index 2fc364ebd78f..31e07bdcdca0 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7896,12 +7896,15 @@ fn resolveGenericInstantiationType( } fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - if (!ty.isSimpleTupleOrAnonStruct()) return; - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |field_val, i| { - try sema.resolveTupleLazyValues(block, src, tuple.types[i]); - if (field_val.ip_index == .unreachable_value) continue; - try sema.resolveLazyValue(field_val); + const mod = sema.mod; + const tuple = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |tuple| tuple, + else => return, + }; + for (tuple.types, tuple.values) |field_ty, field_val| { + try sema.resolveTupleLazyValues(block, src, field_ty.toType()); + if (field_val == .none) continue; + try sema.resolveLazyValue(field_val.toValue()); } } @@ -12038,31 +12041,49 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs); const field_name = try sema.resolveConstString(block, name_src, extra.rhs, "field name must be comptime-known"); const ty = try sema.resolveTypeFields(unresolved_ty); + const ip = &mod.intern_pool; const has_field = hf: { - if (ty.isSlice(mod)) { - if (mem.eql(u8, field_name, "ptr")) break :hf true; - if (mem.eql(u8, field_name, "len")) break :hf true; - break :hf false; - } - if (ty.castTag(.anon_struct)) |pl| { - break :hf for (pl.data.names) |name| { - if (mem.eql(u8, name, field_name)) break true; - } else false; - } - if (ty.isTuple(mod)) { - const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; - break :hf field_index < ty.structFieldCount(mod); - } - break :hf switch (ty.zigTypeTag(mod)) { - .Struct => ty.structFields(mod).contains(field_name), - .Union => ty.unionFields(mod).contains(field_name), - .Enum => ty.enumFieldIndex(field_name, mod) != null, - .Array => mem.eql(u8, field_name, "len"), - else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ - ty.fmt(sema.mod), - }), - }; + switch (ip.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => { + if (mem.eql(u8, field_name, "ptr")) break :hf true; + if (mem.eql(u8, field_name, "len")) break :hf true; + break :hf false; + }, + else => {}, + }, + .anon_struct_type => |anon_struct| { + if (anon_struct.names.len != 0) { + // If the string is not interned, then the field certainly is not present. + const name_interned = ip.getString(field_name).unwrap() orelse break :hf false; + break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, name_interned) != null; + } else { + const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; + break :hf field_index < ty.structFieldCount(mod); + } + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :hf false; + assert(struct_obj.haveFieldTypes()); + break :hf struct_obj.fields.contains(field_name); + }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + assert(union_obj.haveFieldTypes()); + break :hf union_obj.fields.contains(field_name); + }, + .enum_type => |enum_type| { + // If the string is not interned, then the field certainly is not present. + const name_interned = ip.getString(field_name).unwrap() orelse break :hf false; + break :hf enum_type.nameIndex(ip, name_interned) != null; + }, + .array_type => break :hf mem.eql(u8, field_name, "len"), + else => {}, + } + return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ + ty.fmt(sema.mod), + }); }; if (has_field) { return Air.Inst.Ref.bool_true; @@ -12632,42 +12653,48 @@ fn analyzeTupleCat( } const final_len = try sema.usizeCast(block, rhs_src, dest_fields); - const types = try sema.arena.alloc(Type, final_len); - const values = try sema.arena.alloc(Value, final_len); + const types = try sema.arena.alloc(InternPool.Index, final_len); + const values = try sema.arena.alloc(InternPool.Index, final_len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < lhs_len) : (i += 1) { - types[i] = lhs_ty.structFieldType(i, mod); + types[i] = lhs_ty.structFieldType(i, mod).ip_index; const default_val = lhs_ty.structFieldDefaultValue(i, mod); - values[i] = default_val; + values[i] = default_val.ip_index; const operand_src = lhs_src; // TODO better source location if (default_val.ip_index == .unreachable_value) { runtime_src = operand_src; + values[i] = .none; } } i = 0; while (i < rhs_len) : (i += 1) { - types[i + lhs_len] = rhs_ty.structFieldType(i, mod); + types[i + lhs_len] = rhs_ty.structFieldType(i, mod).ip_index; const default_val = rhs_ty.structFieldDefaultValue(i, mod); - values[i + lhs_len] = default_val; + values[i + lhs_len] = default_val.ip_index; const operand_src = rhs_src; // TODO better source location if (default_val.ip_index == .unreachable_value) { runtime_src = operand_src; + values[i + lhs_len] = .none; } } break :rs runtime_src; }; - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = types, .values = values, - }); + .names = &.{}, + } }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .fields = values, + } }); + return sema.addConstant(tuple_ty.toType(), tuple_val.toValue()); }; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -12685,7 +12712,7 @@ fn analyzeTupleCat( try sema.tupleFieldValByIndex(block, operand_src, rhs, i, rhs_ty); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -12938,7 +12965,7 @@ fn analyzeTupleMul( block: *Block, src_node: i32, operand: Air.Inst.Ref, - factor: u64, + factor: usize, ) CompileError!Air.Inst.Ref { const mod = sema.mod; const operand_ty = sema.typeOf(operand); @@ -12947,44 +12974,45 @@ fn analyzeTupleMul( const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node }; const tuple_len = operand_ty.structFieldCount(mod); - const final_len_u64 = std.math.mul(u64, tuple_len, factor) catch + const final_len = std.math.mul(usize, tuple_len, factor) catch return sema.fail(block, rhs_src, "operation results in overflow", .{}); - if (final_len_u64 == 0) { + if (final_len == 0) { return sema.addConstant(Type.empty_struct_literal, Value.empty_struct); } - const final_len = try sema.usizeCast(block, rhs_src, final_len_u64); - - const types = try sema.arena.alloc(Type, final_len); - const values = try sema.arena.alloc(Value, final_len); + const types = try sema.arena.alloc(InternPool.Index, final_len); + const values = try sema.arena.alloc(InternPool.Index, final_len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; - var i: u32 = 0; - while (i < tuple_len) : (i += 1) { - types[i] = operand_ty.structFieldType(i, mod); - values[i] = operand_ty.structFieldDefaultValue(i, mod); + for (0..tuple_len) |i| { + types[i] = operand_ty.structFieldType(i, mod).ip_index; + values[i] = operand_ty.structFieldDefaultValue(i, mod).ip_index; const operand_src = lhs_src; // TODO better source location - if (values[i].ip_index == .unreachable_value) { + if (values[i] == .unreachable_value) { runtime_src = operand_src; + values[i] = .none; // TODO don't treat unreachable_value as special } } - i = 0; - while (i < factor) : (i += 1) { - mem.copyForwards(Type, types[tuple_len * i ..], types[0..tuple_len]); - mem.copyForwards(Value, values[tuple_len * i ..], values[0..tuple_len]); + for (0..factor) |i| { + mem.copyForwards(InternPool.Index, types[tuple_len * i ..], types[0..tuple_len]); + mem.copyForwards(InternPool.Index, values[tuple_len * i ..], values[0..tuple_len]); } break :rs runtime_src; }; - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = types, .values = values, - }); + .names = &.{}, + } }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .fields = values, + } }); + return sema.addConstant(tuple_ty.toType(), tuple_val.toValue()); }; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -13000,7 +13028,7 @@ fn analyzeTupleMul( @memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -13020,7 +13048,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_ty.isTuple(mod)) { // In `**` rhs must be comptime-known, but lhs can be runtime-known const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, "array multiplication factor must be comptime-known"); - return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor); + const factor_casted = try sema.usizeCast(block, rhs_src, factor); + return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor_casted); } // Analyze the lhs first, to catch the case that someone tried to do exponentiation @@ -14533,19 +14562,14 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { .child = .u1_type, }) else Type.u1; - const types = try sema.arena.alloc(Type, 2); - const values = try sema.arena.alloc(Value, 2); - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ - .types = types, - .values = values, - }); - - types[0] = ty; - types[1] = ov_ty; - values[0] = Value.@"unreachable"; - values[1] = Value.@"unreachable"; - - return tuple_ty; + const types = [2]InternPool.Index{ ty.ip_index, ov_ty.ip_index }; + const values = [2]InternPool.Index{ .none, .none }; + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ + .types = &types, + .values = &values, + .names = &.{}, + } }); + return tuple_ty.toType(); } fn analyzeArithmetic( @@ -16506,57 +16530,66 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const layout = struct_ty.containerLayout(mod); const struct_field_vals = fv: { - if (struct_ty.isSimpleTupleOrAnonStruct()) { - const tuple = struct_ty.tupleFields(); - const field_types = tuple.types; - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, field_types.len); - for (struct_field_vals, 0..) |*struct_field_val, i| { - const field_ty = field_types[i]; - const name_val = v: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - const bytes = if (struct_ty.castTag(.anon_struct)) |payload| - try anon_decl.arena().dupeZ(u8, payload.data.names[i]) - else - try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); - const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), - 0, // default alignment - ); - break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ - .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try mod.intValue(Type.usize, bytes.len), - }); - }; - - const struct_field_fields = try fields_anon_decl.arena().create([5]Value); - const field_val = tuple.values[i]; - const is_comptime = field_val.ip_index != .unreachable_value; - const opt_default_val = if (is_comptime) field_val else null; - const default_val_ptr = try sema.optRefValue(block, field_ty, opt_default_val); - struct_field_fields.* = .{ - // name: []const u8, - name_val, - // type: type, - try Value.Tag.ty.create(fields_anon_decl.arena(), field_ty), - // default_value: ?*const anyopaque, - try default_val_ptr.copy(fields_anon_decl.arena()), - // is_comptime: bool, - Value.makeBool(is_comptime), - // alignment: comptime_int, - try field_ty.lazyAbiAlignment(mod, fields_anon_decl.arena()), - }; - struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); - } - break :fv struct_field_vals; - } - const struct_fields = struct_ty.structFields(mod); - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_fields.count()); + const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + .anon_struct_type => |tuple| { + const struct_field_vals = try fields_anon_decl.arena().alloc(Value, tuple.types.len); + for ( + tuple.types, + tuple.values, + struct_field_vals, + 0.., + ) |field_ty, field_val, *struct_field_val, i| { + const name_val = v: { + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + const bytes = if (tuple.names.len != 0) + // https://github.com/ziglang/zig/issues/15709 + @as([]const u8, mod.intern_pool.stringToSlice(tuple.names[i])) + else + try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); + const new_decl = try anon_decl.finish( + try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), + try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + 0, // default alignment + ); + break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ + .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), + .len = try mod.intValue(Type.usize, bytes.len), + }); + }; - for (struct_field_vals, 0..) |*field_val, i| { - const field = struct_fields.values()[i]; - const name = struct_fields.keys()[i]; + const struct_field_fields = try fields_anon_decl.arena().create([5]Value); + const is_comptime = field_val != .none; + const opt_default_val = if (is_comptime) field_val.toValue() else null; + const default_val_ptr = try sema.optRefValue(block, field_ty.toType(), opt_default_val); + struct_field_fields.* = .{ + // name: []const u8, + name_val, + // type: type, + field_ty.toValue(), + // default_value: ?*const anyopaque, + try default_val_ptr.copy(fields_anon_decl.arena()), + // is_comptime: bool, + Value.makeBool(is_comptime), + // alignment: comptime_int, + try field_ty.toType().lazyAbiAlignment(mod, fields_anon_decl.arena()), + }; + struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); + } + break :fv struct_field_vals; + }, + .struct_type => |s| s, + else => unreachable, + }; + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse + break :fv &[0]Value{}; + const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_obj.fields.count()); + + for ( + struct_field_vals, + struct_obj.fields.keys(), + struct_obj.fields.values(), + ) |*field_val, name, field| { const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -18013,7 +18046,7 @@ fn zirStructInit( try sema.requireRuntimeBlock(block, src, null); try sema.queueFullTypeResolution(resolved_ty); return block.addUnionInit(resolved_ty, field_index, init_inst); - } else if (resolved_ty.isAnonStruct()) { + } else if (resolved_ty.isAnonStruct(mod)) { return sema.fail(block, src, "TODO anon struct init validation", .{}); } unreachable; @@ -18034,60 +18067,54 @@ fn finishStructInit( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - if (struct_ty.isAnonStruct()) { - const struct_obj = struct_ty.castTag(.anon_struct).?.data; - for (struct_obj.values, 0..) |default_val, i| { - if (field_inits[i] != .none) continue; - - if (default_val.ip_index == .unreachable_value) { - const field_name = struct_obj.names[i]; - const template = "missing struct field: {s}"; - const args = .{field_name}; - if (root_msg) |msg| { - try sema.errNote(block, init_src, msg, template, args); - } else { - root_msg = try sema.errMsg(block, init_src, template, args); - } - } else { - field_inits[i] = try sema.addConstant(struct_obj.types[i], default_val); - } - } - } else if (struct_ty.isTuple(mod)) { - var i: u32 = 0; - const len = struct_ty.structFieldCount(mod); - while (i < len) : (i += 1) { - if (field_inits[i] != .none) continue; + switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + .anon_struct_type => |anon_struct| { + for (anon_struct.types, anon_struct.values, 0..) |field_ty, default_val, i| { + if (field_inits[i] != .none) continue; - const default_val = struct_ty.structFieldDefaultValue(i, mod); - if (default_val.ip_index == .unreachable_value) { - const template = "missing tuple field with index {d}"; - if (root_msg) |msg| { - try sema.errNote(block, init_src, msg, template, .{i}); + if (default_val == .none) { + if (anon_struct.names.len == 0) { + const template = "missing tuple field with index {d}"; + if (root_msg) |msg| { + try sema.errNote(block, init_src, msg, template, .{i}); + } else { + root_msg = try sema.errMsg(block, init_src, template, .{i}); + } + } else { + const field_name = mod.intern_pool.stringToSlice(anon_struct.names[i]); + const template = "missing struct field: {s}"; + const args = .{field_name}; + if (root_msg) |msg| { + try sema.errNote(block, init_src, msg, template, args); + } else { + root_msg = try sema.errMsg(block, init_src, template, args); + } + } } else { - root_msg = try sema.errMsg(block, init_src, template, .{i}); + field_inits[i] = try sema.addConstant(field_ty.toType(), default_val.toValue()); } - } else { - field_inits[i] = try sema.addConstant(struct_ty.structFieldType(i, mod), default_val); } - } - } else { - const struct_obj = mod.typeToStruct(struct_ty).?; - for (struct_obj.fields.values(), 0..) |field, i| { - if (field_inits[i] != .none) continue; + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + for (struct_obj.fields.values(), 0..) |field, i| { + if (field_inits[i] != .none) continue; - if (field.default_val.ip_index == .unreachable_value) { - const field_name = struct_obj.fields.keys()[i]; - const template = "missing struct field: {s}"; - const args = .{field_name}; - if (root_msg) |msg| { - try sema.errNote(block, init_src, msg, template, args); + if (field.default_val.ip_index == .unreachable_value) { + const field_name = struct_obj.fields.keys()[i]; + const template = "missing struct field: {s}"; + const args = .{field_name}; + if (root_msg) |msg| { + try sema.errNote(block, init_src, msg, template, args); + } else { + root_msg = try sema.errMsg(block, init_src, template, args); + } } else { - root_msg = try sema.errMsg(block, init_src, template, args); + field_inits[i] = try sema.addConstant(field.ty, field.default_val); } - } else { - field_inits[i] = try sema.addConstant(field.ty, field.default_val); } - } + }, + else => unreachable, } if (root_msg) |msg| { @@ -18159,31 +18186,33 @@ fn zirStructInitAnon( is_ref: bool, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index); - const types = try sema.arena.alloc(Type, extra.data.fields_len); - const values = try sema.arena.alloc(Value, types.len); - var fields = std.StringArrayHashMapUnmanaged(u32){}; - defer fields.deinit(sema.gpa); - try fields.ensureUnusedCapacity(sema.gpa, types.len); + const types = try sema.arena.alloc(InternPool.Index, extra.data.fields_len); + const values = try sema.arena.alloc(InternPool.Index, types.len); + var fields = std.AutoArrayHashMap(InternPool.NullTerminatedString, u32).init(sema.arena); + try fields.ensureUnusedCapacity(types.len); // Find which field forces the expression to be runtime, if any. const opt_runtime_index = rs: { var runtime_index: ?usize = null; var extra_index = extra.end; - for (types, 0..) |*field_ty, i| { + for (types, 0..) |*field_ty, i_usize| { + const i = @intCast(u32, i_usize); const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); extra_index = item.end; const name = sema.code.nullTerminatedString(item.data.field_name); - const gop = fields.getOrPutAssumeCapacity(name); + const name_ip = try mod.intern_pool.getOrPutString(gpa, name); + const gop = fields.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const prev_source = mod.initSrc(src.node_offset.x, decl, gop.value_ptr.*); try sema.errNote(block, prev_source, msg, "other field here", .{}); @@ -18191,41 +18220,44 @@ fn zirStructInitAnon( }; return sema.failWithOwnedErrorMsg(msg); } - gop.value_ptr.* = @intCast(u32, i); + gop.value_ptr.* = i; const init = try sema.resolveInst(item.data.init); - field_ty.* = sema.typeOf(init); - if (types[i].zigTypeTag(mod) == .Opaque) { + field_ty.* = sema.typeOf(init).ip_index; + if (types[i].toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, types[i]); + try sema.addDeclaredHereNote(msg, types[i].toType()); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(init)) |init_val| { - values[i] = init_val; + values[i] = init_val.ip_index; } else { - values[i] = Value.@"unreachable"; + values[i] = .none; runtime_index = i; } } break :rs runtime_index; }; - const tuple_ty = try Type.Tag.anon_struct.create(sema.arena, .{ - .names = try sema.arena.dupe([]const u8, fields.keys()), + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ + .names = fields.keys(), .types = types, .values = values, - }); + } }); const runtime_index = opt_runtime_index orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstantMaybeRef(block, tuple_ty, tuple_val, is_ref); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .fields = values, + } }); + return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref); }; sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { @@ -18241,7 +18273,7 @@ fn zirStructInitAnon( if (is_ref) { const target = sema.mod.getTarget(); const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = tuple_ty, + .pointee_type = tuple_ty.toType(), .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); @@ -18254,9 +18286,9 @@ fn zirStructInitAnon( const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = field_ty, + .pointee_type = field_ty.toType(), }); - if (values[i].ip_index == .unreachable_value) { + if (values[i] == .none) { const init = try sema.resolveInst(item.data.init); const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, init); @@ -18274,7 +18306,7 @@ fn zirStructInitAnon( element_refs[i] = try sema.resolveInst(item.data.init); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn zirArrayInit( @@ -18400,43 +18432,47 @@ fn zirArrayInitAnon( const operands = sema.code.refSlice(extra.end, extra.data.operands_len); const mod = sema.mod; - const types = try sema.arena.alloc(Type, operands.len); - const values = try sema.arena.alloc(Value, operands.len); + const types = try sema.arena.alloc(InternPool.Index, operands.len); + const values = try sema.arena.alloc(InternPool.Index, operands.len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; for (operands, 0..) |operand, i| { const operand_src = src; // TODO better source location const elem = try sema.resolveInst(operand); - types[i] = sema.typeOf(elem); - if (types[i].zigTypeTag(mod) == .Opaque) { + types[i] = sema.typeOf(elem).ip_index; + if (types[i].toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, types[i]); + try sema.addDeclaredHereNote(msg, types[i].toType()); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(elem)) |val| { - values[i] = val; + values[i] = val.ip_index; } else { - values[i] = Value.@"unreachable"; + values[i] = .none; runtime_src = operand_src; } } break :rs runtime_src; }; - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = types, .values = values, - }); + .names = &.{}, + } }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstantMaybeRef(block, tuple_ty, tuple_val, is_ref); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .fields = values, + } }); + return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref); }; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -18444,7 +18480,7 @@ fn zirArrayInitAnon( if (is_ref) { const target = sema.mod.getTarget(); const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = tuple_ty, + .pointee_type = tuple_ty.toType(), .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); @@ -18453,9 +18489,9 @@ fn zirArrayInitAnon( const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = types[i], + .pointee_type = types[i].toType(), }); - if (values[i].ip_index == .unreachable_value) { + if (values[i] == .none) { const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, try sema.resolveInst(operand)); } @@ -18469,7 +18505,7 @@ fn zirArrayInitAnon( element_refs[i] = try sema.resolveInst(operand); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn addConstantMaybeRef( @@ -18532,15 +18568,18 @@ fn fieldType( const resolved_ty = try sema.resolveTypeFields(cur_ty); cur_ty = resolved_ty; switch (cur_ty.zigTypeTag(mod)) { - .Struct => { - if (cur_ty.isAnonStruct()) { + .Struct => switch (mod.intern_pool.indexToKey(cur_ty.ip_index)) { + .anon_struct_type => |anon_struct| { const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); - return sema.addType(cur_ty.tupleFields().types[field_index]); - } - const struct_obj = mod.typeToStruct(cur_ty).?; - const field = struct_obj.fields.get(field_name) orelse - return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); - return sema.addType(field.ty); + return sema.addType(anon_struct.types[field_index].toType()); + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const field = struct_obj.fields.get(field_name) orelse + return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); + return sema.addType(field.ty); + }, + else => unreachable, }, .Union => { const union_obj = mod.typeToUnion(cur_ty).?; @@ -24697,7 +24736,7 @@ fn structFieldPtr( } const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); - } else if (struct_ty.isAnonStruct()) { + } else if (struct_ty.isAnonStruct(mod)) { const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); } @@ -24721,11 +24760,11 @@ fn structFieldPtrByIndex( struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { - if (struct_ty.isAnonStruct()) { + const mod = sema.mod; + if (struct_ty.isAnonStruct(mod)) { return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing); } - const mod = sema.mod; const struct_obj = mod.typeToStruct(struct_ty).?; const field = struct_obj.fields.values()[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); @@ -24830,45 +24869,42 @@ fn structFieldVal( assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); - switch (struct_ty.ip_index) { - .empty_struct_type => return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty), - .none => switch (struct_ty.tag()) { - .tuple => return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty), - .anon_struct => { - const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); - return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); - }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); + switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); - const field_index_usize = struct_obj.fields.getIndex(field_name) orelse - return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); - const field_index = @intCast(u32, field_index_usize); - const field = struct_obj.fields.values()[field_index]; - - if (field.is_comptime) { - return sema.addConstant(field.ty, field.default_val); - } + const field_index_usize = struct_obj.fields.getIndex(field_name) orelse + return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); + const field_index = @intCast(u32, field_index_usize); + const field = struct_obj.fields.values()[field_index]; - if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { - if (struct_val.isUndef(mod)) return sema.addConstUndef(field.ty); - if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { - return sema.addConstant(field.ty, opv); - } + if (field.is_comptime) { + return sema.addConstant(field.ty, field.default_val); + } - const field_values = struct_val.castTag(.aggregate).?.data; - return sema.addConstant(field.ty, field_values[field_index]); + if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { + if (struct_val.isUndef(mod)) return sema.addConstUndef(field.ty); + if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { + return sema.addConstant(field.ty, opv); } - try sema.requireRuntimeBlock(block, src, null); - return block.addStructFieldVal(struct_byval, field_index, field.ty); - }, - else => unreachable, + const field_values = struct_val.castTag(.aggregate).?.data; + return sema.addConstant(field.ty, field_values[field_index]); + } + + try sema.requireRuntimeBlock(block, src, null); + return block.addStructFieldVal(struct_byval, field_index, field.ty); + }, + .anon_struct_type => |anon_struct| { + if (anon_struct.names.len == 0) { + return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); + } else { + const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); + return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); + } }, + else => unreachable, } } @@ -25931,7 +25967,7 @@ fn coerceExtra( .Union => { // pointer to anonymous struct to pointer to union if (inst_ty.isSinglePointer(mod) and - inst_ty.childType(mod).isAnonStruct() and + inst_ty.childType(mod).isAnonStruct(mod) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25940,7 +25976,7 @@ fn coerceExtra( .Struct => { // pointer to anonymous struct to pointer to struct if (inst_ty.isSinglePointer(mod) and - inst_ty.childType(mod).isAnonStruct() and + inst_ty.childType(mod).isAnonStruct(mod) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) { @@ -26231,7 +26267,7 @@ fn coerceExtra( .Union => switch (inst_ty.zigTypeTag(mod)) { .Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { - if (inst_ty.isAnonStruct()) { + if (inst_ty.isAnonStruct(mod)) { return sema.coerceAnonStructToUnion(block, dest_ty, dest_ty_src, inst, inst_src); } }, @@ -28771,8 +28807,8 @@ fn coerceAnonStructToUnion( return sema.failWithOwnedErrorMsg(msg); } - const anon_struct = inst_ty.castTag(.anon_struct).?.data; - const field_name = anon_struct.names[0]; + const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + const field_name = mod.intern_pool.stringToSlice(anon_struct.names[0]); const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); } @@ -29010,13 +29046,14 @@ fn coerceTupleToStruct( @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); + const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; var runtime_src: ?LazySrcLoc = null; - const field_count = inst_ty.structFieldCount(mod); - var field_i: u32 = 0; - while (field_i < field_count) : (field_i += 1) { + for (0..anon_struct.types.len) |field_index_usize| { + const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location - const field_name = if (inst_ty.castTag(.anon_struct)) |payload| - payload.data.names[field_i] + const field_name = if (anon_struct.names.len != 0) + // https://github.com/ziglang/zig/issues/15709 + @as([]const u8, mod.intern_pool.stringToSlice(anon_struct.names[field_i])) else try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}); const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src); @@ -29094,21 +29131,22 @@ fn coerceTupleToTuple( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; - const dest_field_count = tuple_ty.structFieldCount(mod); - const field_vals = try sema.arena.alloc(Value, dest_field_count); + const dest_tuple = mod.intern_pool.indexToKey(tuple_ty.ip_index).anon_struct_type; + const field_vals = try sema.arena.alloc(InternPool.Index, dest_tuple.types.len); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const inst_field_count = inst_ty.structFieldCount(mod); - if (inst_field_count > dest_field_count) return error.NotCoercible; + const src_tuple = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + if (src_tuple.types.len > dest_tuple.types.len) return error.NotCoercible; var runtime_src: ?LazySrcLoc = null; - var field_i: u32 = 0; - while (field_i < inst_field_count) : (field_i += 1) { + for (dest_tuple.types, dest_tuple.values, 0..) |field_ty, default_val, field_index_usize| { + const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location - const field_name = if (inst_ty.castTag(.anon_struct)) |payload| - payload.data.names[field_i] + const field_name = if (src_tuple.names.len != 0) + // https://github.com/ziglang/zig/issues/15709 + @as([]const u8, mod.intern_pool.stringToSlice(src_tuple.names[field_i])) else try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}); @@ -29118,23 +29156,21 @@ fn coerceTupleToTuple( const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src); - const field_ty = tuple_ty.structFieldType(field_i, mod); - const default_val = tuple_ty.structFieldDefaultValue(field_i, mod); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); - const coerced = try sema.coerce(block, field_ty, elem_ref, field_src); + const coerced = try sema.coerce(block, field_ty.toType(), elem_ref, field_src); field_refs[field_index] = coerced; - if (default_val.ip_index != .unreachable_value) { + if (default_val != .none) { const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(default_val, field_ty, sema.mod)) { + if (!init_val.eql(default_val.toValue(), field_ty.toType(), sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - field_vals[field_index] = field_val; + field_vals[field_index] = field_val.ip_index; } else { runtime_src = field_src; } @@ -29145,14 +29181,16 @@ fn coerceTupleToTuple( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - for (field_refs, 0..) |*field_ref, i| { + for ( + dest_tuple.types, + dest_tuple.values, + field_refs, + 0.., + ) |field_ty, default_val, *field_ref, i| { if (field_ref.* != .none) continue; - const default_val = tuple_ty.structFieldDefaultValue(i, mod); - const field_ty = tuple_ty.structFieldType(i, mod); - const field_src = inst_src; // TODO better source location - if (default_val.ip_index == .unreachable_value) { + if (default_val == .none) { if (tuple_ty.isTuple(mod)) { const template = "missing tuple field: {d}"; if (root_msg) |msg| { @@ -29174,7 +29212,7 @@ fn coerceTupleToTuple( if (runtime_src == null) { field_vals[i] = default_val; } else { - field_ref.* = try sema.addConstant(field_ty, default_val); + field_ref.* = try sema.addConstant(field_ty.toType(), default_val.toValue()); } } @@ -29191,7 +29229,10 @@ fn coerceTupleToTuple( return sema.addConstant( tuple_ty, - try Value.Tag.aggregate.create(sema.arena, field_vals), + (try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty.ip_index, + .fields = field_vals, + } })).toValue(), ); } @@ -31591,17 +31632,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return sema.resolveTypeRequiresComptime(ty.optionalChild(mod)); }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; - if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty)) { - return true; - } - } - return false; - }, - .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; @@ -31690,6 +31720,16 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, field_val| { + const have_comptime_val = field_val != .none; + if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty.toType())) { + return true; + } + } + return false; + }, + .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); switch (union_obj.requires_comptime) { @@ -31740,20 +31780,16 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { return sema.resolveTypeFully(child_ty); }, .Struct => switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - + .none => {}, // TODO make this unreachable when all types are migrated to InternPool + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => return sema.resolveStructFully(ty), + .anon_struct_type => |tuple| { for (tuple.types) |field_ty| { - try sema.resolveTypeFully(field_ty); + try sema.resolveTypeFully(field_ty.toType()); } }, else => {}, }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => return sema.resolveStructFully(ty), - else => {}, - }, }, .Union => return sema.resolveUnionFully(ty), .Array => return sema.resolveTypeFully(ty.childType(mod)), @@ -33038,17 +33074,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |val, i| { - const is_comptime = val.ip_index != .unreachable_value; - if (is_comptime) continue; - if ((try sema.typeHasOnePossibleValue(tuple.types[i])) != null) continue; - return null; - } - return Value.empty_struct; - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -33150,7 +33175,36 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } } } - // In this case the struct has no fields and therefore has one possible value. + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + + // TODO: this is incorrect for structs with comptime fields, I think + // we should use a temporary allocator to construct an aggregate that + // is populated with the comptime values and then intern that value here. + // This TODO is repeated for anon_struct_type below, as well as + // in the redundant implementation of one-possible-value in type.zig. + const empty = try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .fields = &.{}, + } }); + return empty.toValue(); + }, + + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + const is_comptime = val != .none; + if (is_comptime) continue; + if ((try sema.typeHasOnePossibleValue(field_ty.toType())) != null) continue; + return null; + } + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + + // TODO: this is incorrect for structs with comptime fields, I think + // we should use a temporary allocator to construct an aggregate that + // is populated with the comptime values and then intern that value here. + // This TODO is repeated for struct_type above, as well as + // in the redundant implementation of one-possible-value in type.zig. const empty = try mod.intern(.{ .aggregate = .{ .ty = ty.ip_index, .fields = &.{}, @@ -33647,17 +33701,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return sema.typeRequiresComptime(ty.optionalChild(mod)); }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; - if (!have_comptime_val and try sema.typeRequiresComptime(field_ty)) { - return true; - } - } - return false; - }, - .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; @@ -33752,6 +33795,15 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, } }, + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + const have_comptime_val = val != .none; + if (!have_comptime_val and try sema.typeRequiresComptime(field_ty.toType())) { + return true; + } + } + return false; + }, .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); @@ -33865,7 +33917,7 @@ fn structFieldIndex( ) !u32 { const mod = sema.mod; const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); - if (struct_ty.isAnonStruct()) { + if (struct_ty.isAnonStruct(mod)) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { const struct_obj = mod.typeToStruct(struct_ty).?; @@ -33882,9 +33934,10 @@ fn anonStructFieldIndex( field_name: []const u8, field_src: LazySrcLoc, ) !u32 { - const anon_struct = struct_ty.castTag(.anon_struct).?.data; + const mod = sema.mod; + const anon_struct = mod.intern_pool.indexToKey(struct_ty.ip_index).anon_struct_type; for (anon_struct.names, 0..) |name, i| { - if (mem.eql(u8, name, field_name)) { + if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) { return @intCast(u32, i); } } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index a18f49b96ffe..ced20ac52212 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -177,13 +177,16 @@ pub fn print( } if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) { - switch (field_ptr.container_ty.tag()) { - .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), - else => { - const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod); - return writer.print(".{s}", .{field_name}); + switch (mod.intern_pool.indexToKey(field_ptr.container_ty.ip_index)) { + .anon_struct_type => |anon_struct| { + if (anon_struct.names.len == 0) { + return writer.print(".@\"{d}\"", .{field_ptr.field_index}); + } }, + else => {}, } + const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod); + return writer.print(".{s}", .{field_name}); } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { const field_name = field_ptr.container_ty.unionFields(mod).keys()[field_ptr.field_index]; return writer.print(".{s}", .{field_name}); @@ -396,12 +399,9 @@ fn printAggregate( while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); switch (ty.ip_index) { - .none => switch (ty.tag()) { - .anon_struct => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), - else => {}, - }, + .none => {}, // TODO make this unreachable after finishing InternPool migration else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), + .struct_type, .anon_struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), else => {}, }, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 7e2e37667e18..30c324836008 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -11411,7 +11411,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const union_obj = mod.typeToUnion(union_ty).?; const field_name = union_obj.fields.keys()[extra.field_index]; const tag_ty = union_obj.tag_ty; - const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const field_index = tag_ty.enumFieldIndex(field_name, mod).?; const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); const tag_int_val = try tag_val.enumToInt(tag_ty, mod); const tag_int = tag_int_val.toUnsignedInt(mod); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 2ee7dab2fea5..f45c17822386 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -3417,8 +3417,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const op_inst = Air.refToIndex(un_op); const op_ty = f.typeOf(un_op); const ret_ty = if (is_ptr) op_ty.childType(mod) else op_ty; - var lowered_ret_buf: LowerFnRetTyBuffer = undefined; - const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); + const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod); if (op_inst != null and f.air.instructions.items(.tag)[op_inst.?] == .call_always_tail) { try reap(f, inst, &.{un_op}); @@ -4115,8 +4114,7 @@ fn airCall( } resolved_arg.* = try f.resolveInst(arg); if (arg_cty != try f.typeToIndex(arg_ty, .complete)) { - var lowered_arg_buf: LowerFnRetTyBuffer = undefined; - const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, mod); + const lowered_arg_ty = try lowerFnRetTy(arg_ty, mod); const array_local = try f.allocLocal(inst, lowered_arg_ty); try writer.writeAll("memcpy("); @@ -4146,8 +4144,7 @@ fn airCall( }; const ret_ty = fn_ty.fnReturnType(); - var lowered_ret_buf: LowerFnRetTyBuffer = undefined; - const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); + const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod); const result_local = result: { if (modifier == .always_tail) { @@ -5200,7 +5197,7 @@ fn fieldLocation( const field_ty = container_ty.structFieldType(next_field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - break .{ .field = if (container_ty.isSimpleTuple()) + break .{ .field = if (container_ty.isSimpleTuple(mod)) .{ .field = next_field_index } else .{ .identifier = container_ty.structFieldName(next_field_index, mod) } }; @@ -5395,16 +5392,11 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const field_name: CValue = switch (struct_ty.ip_index) { .none => switch (struct_ty.tag()) { - .tuple, .anon_struct => if (struct_ty.isSimpleTuple()) - .{ .field = extra.field_index } - else - .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, - else => unreachable, }, else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { .struct_type => switch (struct_ty.containerLayout(mod)) { - .Auto, .Extern => if (struct_ty.isSimpleTuple()) + .Auto, .Extern => if (struct_ty.isSimpleTuple(mod)) .{ .field = extra.field_index } else .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, @@ -5465,6 +5457,12 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { return local; }, }, + + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0) + .{ .field = extra.field_index } + else + .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, + .union_type => |union_type| field_name: { const union_obj = mod.unionPtr(union_type.index); if (union_obj.layout == .Packed) { @@ -6791,7 +6789,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const a = try Assignment.start(f, writer, field_ty); - try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple()) + try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple(mod)) .{ .field = field_i } else .{ .identifier = inst_ty.structFieldName(field_i, mod) }); @@ -7704,25 +7702,21 @@ const Vectorize = struct { } }; -const LowerFnRetTyBuffer = struct { - names: [1][]const u8, - types: [1]Type, - values: [1]Value, - payload: Type.Payload.AnonStruct, -}; -fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *Module) Type { - if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.noreturn; +fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type { + if (ret_ty.ip_index == .noreturn_type) return Type.noreturn; if (lowersToArray(ret_ty, mod)) { - buffer.names = [1][]const u8{"array"}; - buffer.types = [1]Type{ret_ty}; - buffer.values = [1]Value{Value.@"unreachable"}; - buffer.payload = .{ .data = .{ - .names = &buffer.names, - .types = &buffer.types, - .values = &buffer.values, - } }; - return Type.initPayload(&buffer.payload.base); + const names = [1]InternPool.NullTerminatedString{ + try mod.intern_pool.getOrPutString(mod.gpa, "array"), + }; + const types = [1]InternPool.Index{ret_ty.ip_index}; + const values = [1]InternPool.Index{.none}; + const interned = try mod.intern(.{ .anon_struct_type = .{ + .names = &names, + .types = &types, + .values = &values, + } }); + return interned.toType(); } return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void; diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index bcb4b92228d7..b51d81a30b22 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1951,7 +1951,7 @@ pub const CType = extern union { defer c_field_i += 1; fields_pl[c_field_i] = .{ - .name = try if (ty.isSimpleTuple()) + .name = try if (ty.isSimpleTuple(mod)) std.fmt.allocPrintZ(arena, "f{}", .{field_i}) else arena.dupeZ(u8, switch (zig_ty_tag) { @@ -2102,7 +2102,7 @@ pub const CType = extern union { .payload => unreachable, }) or !mem.eql( u8, - if (ty.isSimpleTuple()) + if (ty.isSimpleTuple(mod)) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else switch (zig_ty_tag) { .Struct => ty.structFieldName(field_i, mod), @@ -2224,7 +2224,7 @@ pub const CType = extern union { .global => .global, .payload => unreachable, }); - hasher.update(if (ty.isSimpleTuple()) + hasher.update(if (ty.isSimpleTuple(mod)) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else switch (zig_ty_tag) { .Struct => ty.structFieldName(field_i, mod), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index e485b58c3569..3289d389b410 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2009,83 +2009,84 @@ pub const Object = struct { break :blk fwd_decl; }; - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - - var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; - defer di_fields.deinit(gpa); - - try di_fields.ensureUnusedCapacity(gpa, tuple.types.len); - - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - - for (tuple.types, 0..) |field_ty, i| { - const field_val = tuple.values[i]; - if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; - - const field_size = field_ty.abiSize(mod); - const field_align = field_ty.abiAlignment(mod); - const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); - offset = field_offset + field_size; - - const field_name = if (ty.castTag(.anon_struct)) |payload| - try gpa.dupeZ(u8, payload.data.names[i]) - else - try std.fmt.allocPrintZ(gpa, "{d}", .{i}); - defer gpa.free(field_name); + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |tuple| { + var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; + defer di_fields.deinit(gpa); + + try di_fields.ensureUnusedCapacity(gpa, tuple.types.len); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; + + const field_size = field_ty.toType().abiSize(mod); + const field_align = field_ty.toType().abiAlignment(mod); + const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = field_offset + field_size; + + const field_name = if (tuple.names.len != 0) + mod.intern_pool.stringToSlice(tuple.names[i]) + else + try std.fmt.allocPrintZ(gpa, "{d}", .{i}); + defer gpa.free(field_name); + + try di_fields.append(gpa, dib.createMemberType( + fwd_decl.toScope(), + field_name, + null, // file + 0, // line + field_size * 8, // size in bits + field_align * 8, // align in bits + field_offset * 8, // offset in bits + 0, // flags + try o.lowerDebugType(field_ty.toType(), .full), + )); + } - try di_fields.append(gpa, dib.createMemberType( - fwd_decl.toScope(), - field_name, + const full_di_ty = dib.createStructType( + compile_unit_scope, + name.ptr, null, // file 0, // line - field_size * 8, // size in bits - field_align * 8, // align in bits - field_offset * 8, // offset in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags - try o.lowerDebugType(field_ty, .full), - )); - } - - const full_di_ty = dib.createStructType( - compile_unit_scope, - name.ptr, - null, // file - 0, // line - ty.abiSize(mod) * 8, // size in bits - ty.abiAlignment(mod) * 8, // align in bits - 0, // flags - null, // derived from - di_fields.items.ptr, - @intCast(c_int, di_fields.items.len), - 0, // run time lang - null, // vtable holder - "", // unique id - ); - dib.replaceTemporary(fwd_decl, full_di_ty); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); - return full_di_ty; - } - - if (mod.typeToStruct(ty)) |struct_obj| { - if (!struct_obj.haveFieldTypes()) { - // This can happen if a struct type makes it all the way to - // flush() without ever being instantiated or referenced (even - // via pointer). The only reason we are hearing about it now is - // that it is being used as a namespace to put other debug types - // into. Therefore we can satisfy this by making an empty namespace, - // rather than changing the frontend to unnecessarily resolve the - // struct field types. - const owner_decl_index = ty.getOwnerDecl(mod); - const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); - dib.replaceTemporary(fwd_decl, struct_di_ty); - // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` - // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module }); - return struct_di_ty; - } + null, // derived from + di_fields.items.ptr, + @intCast(c_int, di_fields.items.len), + 0, // run time lang + null, // vtable holder + "", // unique id + ); + dib.replaceTemporary(fwd_decl, full_di_ty); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + return full_di_ty; + }, + .struct_type => |struct_type| s: { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s; + + if (!struct_obj.haveFieldTypes()) { + // This can happen if a struct type makes it all the way to + // flush() without ever being instantiated or referenced (even + // via pointer). The only reason we are hearing about it now is + // that it is being used as a namespace to put other debug types + // into. Therefore we can satisfy this by making an empty namespace, + // rather than changing the frontend to unnecessarily resolve the + // struct field types. + const owner_decl_index = ty.getOwnerDecl(mod); + const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); + dib.replaceTemporary(fwd_decl, struct_di_ty); + // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` + // means we can't use `gop` anymore. + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module }); + return struct_di_ty; + } + }, + else => {}, } if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2931,59 +2932,61 @@ pub const DeclGen = struct { // reference, we need to copy it here. gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - if (t.isSimpleTupleOrAnonStruct()) { - const tuple = t.tupleFields(); - const llvm_struct_ty = dg.context.structCreateNamed(""); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls + const struct_type = switch (mod.intern_pool.indexToKey(t.ip_index)) { + .anon_struct_type => |tuple| { + const llvm_struct_ty = dg.context.structCreateNamed(""); + gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{}; - defer llvm_field_types.deinit(gpa); + var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{}; + defer llvm_field_types.deinit(gpa); - try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len); + try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len); - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; - for (tuple.types, 0..) |field_ty, i| { - const field_val = tuple.values[i]; - if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; + for (tuple.types, tuple.values) |field_ty, field_val| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; - const field_align = field_ty.abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - try llvm_field_types.append(gpa, llvm_array_ty); - } - const field_llvm_ty = try dg.lowerType(field_ty); - try llvm_field_types.append(gpa, field_llvm_ty); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + try llvm_field_types.append(gpa, llvm_array_ty); + } + const field_llvm_ty = try dg.lowerType(field_ty.toType()); + try llvm_field_types.append(gpa, field_llvm_ty); - offset += field_ty.abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - try llvm_field_types.append(gpa, llvm_array_ty); + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + try llvm_field_types.append(gpa, llvm_array_ty); + } } - } - llvm_struct_ty.structSetBody( - llvm_field_types.items.ptr, - @intCast(c_uint, llvm_field_types.items.len), - .False, - ); + llvm_struct_ty.structSetBody( + llvm_field_types.items.ptr, + @intCast(c_uint, llvm_field_types.items.len), + .False, + ); - return llvm_struct_ty; - } + return llvm_struct_ty; + }, + .struct_type => |struct_type| struct_type, + else => unreachable, + }; - const struct_obj = mod.typeToStruct(t).?; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); @@ -3625,71 +3628,74 @@ pub const DeclGen = struct { const field_vals = tv.val.castTag(.aggregate).?.data; const gpa = dg.gpa; - if (tv.ty.isSimpleTupleOrAnonStruct()) { - const tuple = tv.ty.tupleFields(); - var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; - defer llvm_fields.deinit(gpa); + const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .anon_struct_type => |tuple| { + var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; + defer llvm_fields.deinit(gpa); - try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; - for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].ip_index != .unreachable_value) continue; - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - - const field_align = field_ty.abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_llvm_val = try dg.lowerValue(.{ - .ty = field_ty, - .val = field_vals[i], - }); + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); - need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field_llvm_val); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } - llvm_fields.appendAssumeCapacity(field_llvm_val); + const field_llvm_val = try dg.lowerValue(.{ + .ty = field_ty.toType(), + .val = field_vals[i], + }); - offset += field_ty.abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); + + llvm_fields.appendAssumeCapacity(field_llvm_val); + + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } } - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - } + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); + } + }, + .struct_type => |struct_type| struct_type, + else => unreachable, + }; - const struct_obj = mod.typeToStruct(tv.ty).?; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); @@ -4077,13 +4083,11 @@ pub const DeclGen = struct { return field_addr.constIntToPtr(final_llvm_ty); } - var ty_buf: Type.Payload.Pointer = undefined; - const parent_llvm_ty = try dg.lowerType(parent_ty); - if (llvmFieldIndex(parent_ty, field_index, mod, &ty_buf)) |llvm_field_index| { + if (llvmField(parent_ty, field_index, mod)) |llvm_field| { const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_field_index, .False), + llvm_u32.constInt(llvm_field.index, .False), }; return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } else { @@ -6006,8 +6010,7 @@ pub const FuncGen = struct { return self.builder.buildTrunc(shifted_value, elem_llvm_ty, ""); }, else => { - var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; + const llvm_field_index = llvmField(struct_ty, field_index, mod).?.index; return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, ""); }, }, @@ -6035,16 +6038,22 @@ pub const FuncGen = struct { switch (struct_ty.zigTypeTag(mod)) { .Struct => { assert(struct_ty.containerLayout(mod) != .Packed); - var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; + const llvm_field = llvmField(struct_ty, field_index, mod).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); - const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field_index, ""); - const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); + const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); + const field_ptr_ty = try mod.ptrType(.{ + .elem_type = llvm_field.ty.ip_index, + .alignment = llvm_field.alignment, + }); if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; - return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(mod), false); + const field_alignment = if (llvm_field.alignment != 0) + llvm_field.alignment + else + llvm_field.ty.abiAlignment(mod); + return self.loadByRef(field_ptr, field_ty, field_alignment, false); } else { return self.load(field_ptr, field_ptr_ty); } @@ -6912,12 +6921,14 @@ pub const FuncGen = struct { const struct_ty = self.air.getRefType(ty_pl.ty); const field_index = ty_pl.payload; - var ptr_ty_buf: Type.Payload.Pointer = undefined; const mod = self.dg.module; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; + const llvm_field = llvmField(struct_ty, field_index, mod).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); - const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field_index, ""); - const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); + const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); + const field_ptr_ty = try mod.ptrType(.{ + .elem_type = llvm_field.ty.ip_index, + .alignment = llvm_field.alignment, + }); return self.load(field_ptr, field_ptr_ty); } @@ -7430,9 +7441,8 @@ pub const FuncGen = struct { const result = self.builder.buildExtractValue(result_struct, 0, ""); const overflow_bit = self.builder.buildExtractValue(result_struct, 1, ""); - var ty_buf: Type.Payload.Pointer = undefined; - const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?; - const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?; + const result_index = llvmField(dest_ty, 0, mod).?.index; + const overflow_index = llvmField(dest_ty, 1, mod).?.index; if (isByRef(dest_ty, mod)) { const result_alignment = dest_ty.abiAlignment(mod); @@ -7736,9 +7746,8 @@ pub const FuncGen = struct { const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, ""); - var ty_buf: Type.Payload.Pointer = undefined; - const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?; - const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?; + const result_index = llvmField(dest_ty, 0, mod).?.index; + const overflow_index = llvmField(dest_ty, 1, mod).?.index; if (isByRef(dest_ty, mod)) { const result_alignment = dest_ty.abiAlignment(mod); @@ -9300,8 +9309,6 @@ pub const FuncGen = struct { return running_int; } - var ptr_ty_buf: Type.Payload.Pointer = undefined; - if (isByRef(result_ty, mod)) { const llvm_u32 = self.context.intType(32); // TODO in debug builds init to undef so that the padding will be 0xaa @@ -9313,7 +9320,7 @@ pub const FuncGen = struct { if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); - const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; + const llvm_i = llvmField(result_ty, i, mod).?.index; indices[1] = llvm_u32.constInt(llvm_i, .False); const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); var field_ptr_payload: Type.Payload.Pointer = .{ @@ -9334,7 +9341,7 @@ pub const FuncGen = struct { if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); - const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; + const llvm_i = llvmField(result_ty, i, mod).?.index; result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, ""); } return result; @@ -9796,9 +9803,8 @@ pub const FuncGen = struct { else => { const struct_llvm_ty = try self.dg.lowerPtrElemTy(struct_ty); - var ty_buf: Type.Payload.Pointer = undefined; - if (llvmFieldIndex(struct_ty, field_index, mod, &ty_buf)) |llvm_field_index| { - return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field_index, ""); + if (llvmField(struct_ty, field_index, mod)) |llvm_field| { + return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field.index, ""); } else { // If we found no index then this means this is a zero sized field at the // end of the struct. Treat our struct pointer as an array of two and get @@ -10457,59 +10463,61 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ }; } +const LlvmField = struct { + index: c_uint, + ty: Type, + alignment: u32, +}; + /// Take into account 0 bit fields and padding. Returns null if an llvm /// field could not be found. /// This only happens if you want the field index of a zero sized field at /// the end of the struct. -fn llvmFieldIndex( - ty: Type, - field_index: usize, - mod: *Module, - ptr_pl_buf: *Type.Payload.Pointer, -) ?c_uint { +fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { // Detects where we inserted extra padding fields so that we can skip // over them in this function. comptime assert(struct_layout_version == 2); var offset: u64 = 0; var big_align: u32 = 0; - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - var llvm_field_index: c_uint = 0; - for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; + const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |tuple| { + var llvm_field_index: c_uint = 0; + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; - const field_align = field_ty.abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - llvm_field_index += 1; - } + const padding_len = offset - prev_offset; + if (padding_len > 0) { + llvm_field_index += 1; + } - if (field_index <= i) { - ptr_pl_buf.* = .{ - .data = .{ - .pointee_type = field_ty, - .@"align" = field_align, - .@"addrspace" = .generic, - }, - }; - return llvm_field_index; - } + if (field_index <= i) { + return .{ + .index = llvm_field_index, + .ty = field_ty.toType(), + .alignment = field_align, + }; + } - llvm_field_index += 1; - offset += field_ty.abiSize(mod); - } - return null; - } - const layout = ty.containerLayout(mod); + llvm_field_index += 1; + offset += field_ty.toType().abiSize(mod); + } + return null; + }, + .struct_type => |s| s, + else => unreachable, + }; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const layout = struct_obj.layout; assert(layout != .Packed); var llvm_field_index: c_uint = 0; - var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod); + var it = struct_obj.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; const field_align = field.alignment(mod, layout); @@ -10523,14 +10531,11 @@ fn llvmFieldIndex( } if (field_index == field_and_index.index) { - ptr_pl_buf.* = .{ - .data = .{ - .pointee_type = field.ty, - .@"align" = field_align, - .@"addrspace" = .generic, - }, + return .{ + .index = llvm_field_index, + .ty = field.ty, + .alignment = field_align, }; - return llvm_field_index; } llvm_field_index += 1; @@ -11089,21 +11094,24 @@ fn isByRef(ty: Type, mod: *Module) bool { .Struct => { // Packed structs are represented to LLVM as integers. if (ty.containerLayout(mod) == .Packed) return false; - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - var count: usize = 0; - for (tuple.values, 0..) |field_val, i| { - if (field_val.ip_index != .unreachable_value or !tuple.types[i].hasRuntimeBits(mod)) continue; - - count += 1; - if (count > max_fields_byval) return true; - if (isByRef(tuple.types[i], mod)) return true; - } - return false; - } + const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |tuple| { + var count: usize = 0; + for (tuple.types, tuple.values) |field_ty, field_val| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; + + count += 1; + if (count > max_fields_byval) return true; + if (isByRef(field_ty.toType(), mod)) return true; + } + return false; + }, + .struct_type => |s| s, + else => unreachable, + }; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; var count: usize = 0; - const fields = ty.structFields(mod); - for (fields.values()) |field| { + for (struct_obj.fields.values()) |field| { if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; count += 1; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index a81e36fefabc..32ea975b6425 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -682,7 +682,7 @@ pub const DeclGen = struct { else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}), }, .Struct => { - if (ty.isSimpleTupleOrAnonStruct()) { + if (ty.isSimpleTupleOrAnonStruct(mod)) { unreachable; // TODO } else { const struct_ty = mod.typeToStruct(ty).?; @@ -1319,7 +1319,8 @@ pub const DeclGen = struct { defer self.gpa.free(member_names); var member_index: usize = 0; - for (struct_ty.fields.values(), 0..) |field, i| { + const struct_obj = void; // TODO + for (struct_obj.fields.values(), 0..) |field, i| { if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; member_types[member_index] = try self.resolveType(field.ty, .indirect); @@ -1327,7 +1328,7 @@ pub const DeclGen = struct { member_index += 1; } - const name = try struct_ty.getFullyQualifiedName(self.module); + const name = try struct_obj.getFullyQualifiedName(self.module); defer self.module.gpa.free(name); return try self.spv.resolve(.{ .struct_type = .{ @@ -2090,7 +2091,7 @@ pub const DeclGen = struct { var i: usize = 0; while (i < mask_len) : (i += 1) { - const elem = try mask.elemValue(self.module, i); + const elem = try mask.elemValue(mod, i); if (elem.isUndef(mod)) { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { @@ -2805,7 +2806,7 @@ pub const DeclGen = struct { const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); - const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false; if (val_is_undef) { const undef = try self.spv.constUndef(ptr_ty_ref); try self.store(ptr_ty, ptr, undef); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index e20e127800c6..b9722f8c95bc 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -333,13 +333,12 @@ pub const DeclState = struct { // DW.AT.byte_size, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); - switch (ty.tag()) { - .tuple, .anon_struct => { + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |fields| { // DW.AT.name, DW.FORM.string try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); - const fields = ty.tupleFields(); - for (fields.types, 0..) |field, field_index| { + for (fields.types, 0..) |field_ty, field_index| { // DW.AT.member try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member)); // DW.AT.name, DW.FORM.string @@ -347,28 +346,30 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, - else => { + .struct_type => |struct_type| s: { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s; // DW.AT.name, DW.FORM.string const struct_name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1); dbg_info_buffer.appendSliceAssumeCapacity(struct_name); dbg_info_buffer.appendAssumeCapacity(0); - const struct_obj = mod.typeToStruct(ty).?; if (struct_obj.layout == .Packed) { log.debug("TODO implement .debug_info for packed structs", .{}); break :blk; } - const fields = ty.structFields(mod); - for (fields.keys(), 0..) |field_name, field_index| { - const field = fields.get(field_name).?; + for ( + struct_obj.fields.keys(), + struct_obj.fields.values(), + 0.., + ) |field_name, field, field_index| { if (!field.ty.hasRuntimeBits(mod)) continue; // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2); @@ -385,6 +386,7 @@ pub const DeclState = struct { try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, + else => unreachable, } // DW.AT.structure_type delimit children diff --git a/src/type.zig b/src/type.zig index d051191bfe30..ee9e7c8e1700 100644 --- a/src/type.zig +++ b/src/type.zig @@ -54,10 +54,6 @@ pub const Type = struct { .error_union => return .ErrorUnion, .anyframe_T => return .AnyFrame, - - .tuple, - .anon_struct, - => return .Struct, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return .Int, @@ -66,7 +62,7 @@ pub const Type = struct { .vector_type => return .Vector, .opt_type => return .Optional, .error_union_type => return .ErrorUnion, - .struct_type => return .Struct, + .struct_type, .anon_struct_type => return .Struct, .union_type => return .Union, .opaque_type => return .Opaque, .enum_type => return .Enum, @@ -465,76 +461,6 @@ pub const Type = struct { if (b.zigTypeTag(mod) != .AnyFrame) return false; return a.elemType2(mod).eql(b.elemType2(mod), mod); }, - - .tuple => { - if (!b.isSimpleTuple()) return false; - - const a_tuple = a.tupleFields(); - const b_tuple = b.tupleFields(); - - if (a_tuple.types.len != b_tuple.types.len) return false; - - for (a_tuple.types, 0..) |a_ty, i| { - const b_ty = b_tuple.types[i]; - if (!eql(a_ty, b_ty, mod)) return false; - } - - for (a_tuple.values, 0..) |a_val, i| { - const ty = a_tuple.types[i]; - const b_val = b_tuple.values[i]; - if (a_val.ip_index == .unreachable_value) { - if (b_val.ip_index == .unreachable_value) { - continue; - } else { - return false; - } - } else { - if (b_val.ip_index == .unreachable_value) { - return false; - } else { - if (!Value.eql(a_val, b_val, ty, mod)) return false; - } - } - } - - return true; - }, - .anon_struct => { - const a_struct_obj = a.castTag(.anon_struct).?.data; - const b_struct_obj = (b.castTag(.anon_struct) orelse return false).data; - - if (a_struct_obj.types.len != b_struct_obj.types.len) return false; - - for (a_struct_obj.names, 0..) |a_name, i| { - const b_name = b_struct_obj.names[i]; - if (!std.mem.eql(u8, a_name, b_name)) return false; - } - - for (a_struct_obj.types, 0..) |a_ty, i| { - const b_ty = b_struct_obj.types[i]; - if (!eql(a_ty, b_ty, mod)) return false; - } - - for (a_struct_obj.values, 0..) |a_val, i| { - const ty = a_struct_obj.types[i]; - const b_val = b_struct_obj.values[i]; - if (a_val.ip_index == .unreachable_value) { - if (b_val.ip_index == .unreachable_value) { - continue; - } else { - return false; - } - } else { - if (b_val.ip_index == .unreachable_value) { - return false; - } else { - if (!Value.eql(a_val, b_val, ty, mod)) return false; - } - } - } - - return true; - }, } } @@ -641,34 +567,6 @@ pub const Type = struct { std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame); hashWithHasher(ty.childType(mod), hasher, mod); }, - - .tuple => { - std.hash.autoHash(hasher, std.builtin.TypeId.Struct); - - const tuple = ty.tupleFields(); - std.hash.autoHash(hasher, tuple.types.len); - - for (tuple.types, 0..) |field_ty, i| { - hashWithHasher(field_ty, hasher, mod); - const field_val = tuple.values[i]; - if (field_val.ip_index == .unreachable_value) continue; - field_val.hash(field_ty, hasher, mod); - } - }, - .anon_struct => { - const struct_obj = ty.castTag(.anon_struct).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Struct); - std.hash.autoHash(hasher, struct_obj.types.len); - - for (struct_obj.types, 0..) |field_ty, i| { - const field_name = struct_obj.names[i]; - const field_val = struct_obj.values[i]; - hasher.update(field_name); - hashWithHasher(field_ty, hasher, mod); - if (field_val.ip_index == .unreachable_value) continue; - field_val.hash(field_ty, hasher, mod); - } - }, } } @@ -733,41 +631,6 @@ pub const Type = struct { }; }, - .tuple => { - const payload = self.castTag(.tuple).?.data; - const types = try allocator.alloc(Type, payload.types.len); - const values = try allocator.alloc(Value, payload.values.len); - for (payload.types, 0..) |ty, i| { - types[i] = try ty.copy(allocator); - } - for (payload.values, 0..) |val, i| { - values[i] = try val.copy(allocator); - } - return Tag.tuple.create(allocator, .{ - .types = types, - .values = values, - }); - }, - .anon_struct => { - const payload = self.castTag(.anon_struct).?.data; - const names = try allocator.alloc([]const u8, payload.names.len); - const types = try allocator.alloc(Type, payload.types.len); - const values = try allocator.alloc(Value, payload.values.len); - for (payload.names, 0..) |name, i| { - names[i] = try allocator.dupe(u8, name); - } - for (payload.types, 0..) |ty, i| { - types[i] = try ty.copy(allocator); - } - for (payload.values, 0..) |val, i| { - values[i] = try val.copy(allocator); - } - return Tag.anon_struct.create(allocator, .{ - .names = names, - .types = types, - .values = values, - }); - }, .function => { const payload = self.castTag(.function).?.data; const param_types = try allocator.alloc(Type, payload.param_types.len); @@ -935,42 +798,6 @@ pub const Type = struct { ty = return_type; continue; }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - try writer.writeAll("tuple{"); - for (tuple.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = tuple.values[i]; - if (val.ip_index != .unreachable_value) { - try writer.writeAll("comptime "); - } - try field_ty.dump("", .{}, writer); - if (val.ip_index != .unreachable_value) { - try writer.print(" = {}", .{val.fmtDebug()}); - } - } - try writer.writeAll("}"); - return; - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - try writer.writeAll("struct{"); - for (anon_struct.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = anon_struct.values[i]; - if (val.ip_index != .unreachable_value) { - try writer.writeAll("comptime "); - } - try writer.writeAll(anon_struct.names[i]); - try writer.writeAll(": "); - try field_ty.dump("", .{}, writer); - if (val.ip_index != .unreachable_value) { - try writer.print(" = {}", .{val.fmtDebug()}); - } - } - try writer.writeAll("}"); - return; - }, .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); @@ -1131,45 +958,6 @@ pub const Type = struct { try print(error_union.payload, writer, mod); }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - - try writer.writeAll("tuple{"); - for (tuple.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = tuple.values[i]; - if (val.ip_index != .unreachable_value) { - try writer.writeAll("comptime "); - } - try print(field_ty, writer, mod); - if (val.ip_index != .unreachable_value) { - try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); - } - } - try writer.writeAll("}"); - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - - try writer.writeAll("struct{"); - for (anon_struct.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = anon_struct.values[i]; - if (val.ip_index != .unreachable_value) { - try writer.writeAll("comptime "); - } - try writer.writeAll(anon_struct.names[i]); - try writer.writeAll(": "); - - try print(field_ty, writer, mod); - - if (val.ip_index != .unreachable_value) { - try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); - } - } - try writer.writeAll("}"); - }, - .pointer => { const info = ty.ptrInfo(mod); @@ -1335,6 +1123,27 @@ pub const Type = struct { try writer.writeAll("@TypeOf(.{})"); } }, + .anon_struct_type => |anon_struct| { + try writer.writeAll("struct{"); + for (anon_struct.types, anon_struct.values, 0..) |field_ty, val, i| { + if (i != 0) try writer.writeAll(", "); + if (val != .none) { + try writer.writeAll("comptime "); + } + if (anon_struct.names.len != 0) { + const name = mod.intern_pool.stringToSlice(anon_struct.names[i]); + try writer.writeAll(name); + try writer.writeAll(": "); + } + + try print(field_ty.toType(), writer, mod); + + if (val != .none) { + try writer.print(" = {}", .{val.toValue().fmtValue(field_ty.toType(), mod)}); + } + } + try writer.writeAll("}"); + }, .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); @@ -1443,16 +1252,6 @@ pub const Type = struct { } }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const val = tuple.values[i]; - if (val.ip_index != .unreachable_value) continue; // comptime field - if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; - } - return false; - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -1567,6 +1366,13 @@ pub const Type = struct { return false; } }, + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + if (val != .none) continue; // comptime field + if (try field_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; + } + return false; + }, .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); @@ -1634,8 +1440,6 @@ pub const Type = struct { .function, .error_union, .anyframe_T, - .tuple, - .anon_struct, => false, .inferred_alloc_mut => unreachable, @@ -1705,6 +1509,7 @@ pub const Type = struct { }; return struct_obj.layout != .Auto; }, + .anon_struct_type => false, .union_type => |union_type| switch (union_type.runtime_tag) { .none, .safety => mod.unionPtr(union_type.index).layout != .Auto, .tagged => false, @@ -1923,26 +1728,6 @@ pub const Type = struct { .optional => return abiAlignmentAdvancedOptional(ty, mod, strat), .error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - var big_align: u32 = 0; - for (tuple.types, 0..) |field_ty, i| { - const val = tuple.values[i]; - if (val.ip_index != .unreachable_value) continue; // comptime field - if (!(field_ty.hasRuntimeBits(mod))) continue; - - switch (try field_ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |field_align| big_align = @max(big_align, field_align), - .val => switch (strat) { - .eager => unreachable, // field type alignment not resolved - .sema => unreachable, // passed to abiAlignmentAdvanced above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }, - } - } - return AbiAlignmentAdvanced{ .scalar = big_align }; - }, - .inferred_alloc_const, .inferred_alloc_mut, => unreachable, @@ -2100,6 +1885,24 @@ pub const Type = struct { } return AbiAlignmentAdvanced{ .scalar = big_align }; }, + .anon_struct_type => |tuple| { + var big_align: u32 = 0; + for (tuple.types, tuple.values) |field_ty, val| { + if (val != .none) continue; // comptime field + if (!(field_ty.toType().hasRuntimeBits(mod))) continue; + + switch (try field_ty.toType().abiAlignmentAdvanced(mod, strat)) { + .scalar => |field_align| big_align = @max(big_align, field_align), + .val => switch (strat) { + .eager => unreachable, // field type alignment not resolved + .sema => unreachable, // passed to abiAlignmentAdvanced above + .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + }, + } + } + return AbiAlignmentAdvanced{ .scalar = big_align }; + }, + .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); @@ -2287,18 +2090,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .tuple, .anon_struct => { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy, .eager => {}, - } - const field_count = ty.structFieldCount(mod); - if (field_count == 0) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; - }, - .anyframe_T => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .pointer => switch (ty.castTag(.pointer).?.data.size) { @@ -2496,6 +2287,18 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, }, + .anon_struct_type => |tuple| { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy, .eager => {}, + } + const field_count = tuple.types.len; + if (field_count == 0) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; + }, + .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); return abiSizeAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); @@ -2609,18 +2412,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .tuple, .anon_struct => { - if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); - if (ty.containerLayout(mod) != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - var total: u64 = 0; - for (ty.tupleFields().types) |field_ty| { - total += try bitSizeAdvanced(field_ty, mod, opt_sema); - } - return total; - }, - .anyframe_T => return target.ptrBitWidth(), .pointer => switch (ty.castTag(.pointer).?.data.size) { @@ -2724,6 +2515,11 @@ pub const Type = struct { return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); }, + .anon_struct_type => { + if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + }, + .union_type => |union_type| { if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); if (ty.containerLayout(mod) != .Packed) { @@ -3220,23 +3016,17 @@ pub const Type = struct { } pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { - return switch (ty.ip_index) { - .empty_struct_type => .Auto, - .none => switch (ty.tag()) { - .tuple, .anon_struct => .Auto, - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto; + return struct_obj.layout; }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto; - return struct_obj.layout; - }, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - return union_obj.layout; - }, - else => unreachable, + .anon_struct_type => .Auto, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.layout; }, + else => unreachable, }; } @@ -3349,23 +3139,16 @@ pub const Type = struct { } pub fn arrayLenIp(ty: Type, ip: InternPool) u64 { - return switch (ty.ip_index) { - .empty_struct_type => 0, - .none => switch (ty.tag()) { - .tuple => ty.castTag(.tuple).?.data.types.len, - .anon_struct => ty.castTag(.anon_struct).?.data.types.len, - - else => unreachable, - }, - else => switch (ip.indexToKey(ty.ip_index)) { - .vector_type => |vector_type| vector_type.len, - .array_type => |array_type| array_type.len, - .struct_type => |struct_type| { - const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0; - return struct_obj.fields.count(); - }, - else => unreachable, + return switch (ip.indexToKey(ty.ip_index)) { + .vector_type => |vector_type| vector_type.len, + .array_type => |array_type| array_type.len, + .struct_type => |struct_type| { + const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0; + return struct_obj.fields.count(); }, + .anon_struct_type => |tuple| tuple.types.len, + + else => unreachable, }; } @@ -3374,16 +3157,10 @@ pub const Type = struct { } pub fn vectorLen(ty: Type, mod: *const Module) u32 { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len), - .anon_struct => @intCast(u32, ty.castTag(.anon_struct).?.data.types.len), - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .vector_type => |vector_type| vector_type.len, - else => unreachable, - }, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .vector_type => |vector_type| vector_type.len, + .anon_struct_type => |tuple| @intCast(u32, tuple.types.len), + else => unreachable, }; } @@ -3391,8 +3168,6 @@ pub const Type = struct { pub fn sentinel(ty: Type, mod: *const Module) ?Value { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .tuple => null, - .pointer => ty.castTag(.pointer).?.data.sentinel, else => unreachable, @@ -3400,6 +3175,7 @@ pub const Type = struct { else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .vector_type, .struct_type, + .anon_struct_type, => null, .array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, @@ -3486,10 +3262,12 @@ pub const Type = struct { ty = struct_obj.backing_int_ty; }, .enum_type => |enum_type| ty = enum_type.tag_ty.toType(), + .vector_type => |vector_type| ty = vector_type.child.toType(), + + .anon_struct_type => unreachable, .ptr_type => unreachable, .array_type => unreachable, - .vector_type => |vector_type| ty = vector_type.child.toType(), .opt_type => unreachable, .error_union_type => unreachable, @@ -3711,17 +3489,6 @@ pub const Type = struct { } }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |val, i| { - const is_comptime = val.ip_index != .unreachable_value; - if (is_comptime) continue; - if ((try tuple.types[i].onePossibleValue(mod)) != null) continue; - return null; - } - return Value.empty_struct; - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -3810,7 +3577,33 @@ pub const Type = struct { return null; } } - // In this case the struct has no fields and therefore has one possible value. + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + + // TODO: this is incorrect for structs with comptime fields, I think + // we should use a temporary allocator to construct an aggregate that + // is populated with the comptime values and then intern that value here. + // This TODO is repeated for anon_struct_type below, as well as in + // the redundant implementation of one-possible-value logic in Sema.zig. + const empty = try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .fields = &.{}, + } }); + return empty.toValue(); + }, + + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + if (val != .none) continue; // comptime field + if ((try field_ty.toType().onePossibleValue(mod)) != null) continue; + return null; + } + + // TODO: this is incorrect for structs with comptime fields, I think + // we should use a temporary allocator to construct an aggregate that + // is populated with the comptime values and then intern that value here. + // This TODO is repeated for struct_type above, as well as in + // the redundant implementation of one-possible-value logic in Sema.zig. const empty = try mod.intern(.{ .aggregate = .{ .ty = ty.ip_index, .fields = &.{}, @@ -3915,15 +3708,6 @@ pub const Type = struct { return ty.optionalChild(mod).comptimeOnly(mod); }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; - if (!have_comptime_val and field_ty.comptimeOnly(mod)) return true; - } - return false; - }, - .error_union => return ty.errorUnionPayload().comptimeOnly(mod), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; @@ -4007,6 +3791,14 @@ pub const Type = struct { } }, + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + const have_comptime_val = val != .none; + if (!have_comptime_val and field_ty.toType().comptimeOnly(mod)) return true; + } + return false; + }, + .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); switch (union_obj.requires_comptime) { @@ -4275,171 +4067,116 @@ pub const Type = struct { } pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) []const u8 { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .anon_struct => return ty.castTag(.anon_struct).?.data.names[field_index], - else => unreachable, + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.haveFieldTypes()); + return struct_obj.fields.keys()[field_index]; }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields.keys()[field_index]; - }, - else => unreachable, + .anon_struct_type => |anon_struct| { + const name = anon_struct.names[field_index]; + return mod.intern_pool.stringToSlice(name); }, + else => unreachable, } } pub fn structFieldCount(ty: Type, mod: *Module) usize { - return switch (ty.ip_index) { - .empty_struct_type => 0, - .none => switch (ty.tag()) { - .tuple => ty.castTag(.tuple).?.data.types.len, - .anon_struct => ty.castTag(.anon_struct).?.data.types.len, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields.count(); - }, - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; + assert(struct_obj.haveFieldTypes()); + return struct_obj.fields.count(); }, + .anon_struct_type => |anon_struct| anon_struct.types.len, + else => unreachable, }; } /// Supports structs and unions. pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => return ty.castTag(.tuple).?.data.types[index], - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index], - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + return struct_obj.fields.values()[index].ty; }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - return struct_obj.fields.values()[index].ty; - }, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - return union_obj.fields.values()[index].ty; - }, - else => unreachable, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.fields.values()[index].ty; }, + .anon_struct_type => |anon_struct| anon_struct.types[index].toType(), + else => unreachable, }; } pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod), - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod), - else => unreachable, + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.layout != .Packed); + return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.layout != .Packed); - return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); - }, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - return union_obj.fields.values()[index].normalAlignment(mod); - }, - else => unreachable, + .anon_struct_type => |anon_struct| { + return anon_struct.types[index].toType().abiAlignment(mod); }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.fields.values()[index].normalAlignment(mod); + }, + else => unreachable, } } pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - return tuple.values[index]; - }, - .anon_struct => { - const struct_obj = ty.castTag(.anon_struct).?.data; - return struct_obj.values[index]; - }, - else => unreachable, + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + return struct_obj.fields.values()[index].default_val; }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - return struct_obj.fields.values()[index].default_val; - }, - else => unreachable, + .anon_struct_type => |anon_struct| { + const val = anon_struct.values[index]; + // TODO: avoid using `unreachable` to indicate this. + if (val == .none) return Value.@"unreachable"; + return val.toValue(); }, + else => unreachable, } } pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - const val = tuple.values[index]; - if (val.ip_index == .unreachable_value) { - return tuple.types[index].onePossibleValue(mod); - } else { - return val; - } - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - const val = anon_struct.values[index]; - if (val.ip_index == .unreachable_value) { - return anon_struct.types[index].onePossibleValue(mod); - } else { - return val; - } - }, - else => unreachable, + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const field = struct_obj.fields.values()[index]; + if (field.is_comptime) { + return field.default_val; + } else { + return field.ty.onePossibleValue(mod); + } }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - const field = struct_obj.fields.values()[index]; - if (field.is_comptime) { - return field.default_val; - } else { - return field.ty.onePossibleValue(mod); - } - }, - else => unreachable, + .anon_struct_type => |tuple| { + const val = tuple.values[index]; + if (val == .none) { + return tuple.types[index].toType().onePossibleValue(mod); + } else { + return val.toValue(); + } }, + else => unreachable, } } pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - const val = tuple.values[index]; - return val.ip_index != .unreachable_value; - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - const val = anon_struct.values[index]; - return val.ip_index != .unreachable_value; - }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - if (struct_obj.layout == .Packed) return false; - const field = struct_obj.fields.values()[index]; - return field.is_comptime; - }, - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + if (struct_obj.layout == .Packed) return false; + const field = struct_obj.fields.values()[index]; + return field.is_comptime; }, - } + .anon_struct_type => |anon_struct| anon_struct.values[index] != .none, + else => unreachable, + }; } pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 { @@ -4516,46 +4253,43 @@ pub const Type = struct { pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { switch (ty.ip_index) { .none => switch (ty.tag()) { - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.haveLayout()); + assert(struct_obj.layout != .Packed); + var it = ty.iterateStructOffsets(mod); + while (it.next()) |field_offset| { + if (index == field_offset.field) + return field_offset.offset; + } + return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); + }, + + .anon_struct_type => |tuple| { var offset: u64 = 0; var big_align: u32 = 0; - for (tuple.types, 0..) |field_ty, i| { - const field_val = tuple.values[i]; - if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) { // comptime field if (i == index) return offset; continue; } - const field_align = field_ty.abiAlignment(mod); + const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); offset = std.mem.alignForwardGeneric(u64, offset, field_align); if (i == index) return offset; - offset += field_ty.abiSize(mod); + offset += field_ty.toType().abiSize(mod); } offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); return offset; }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.haveLayout()); - assert(struct_obj.layout != .Packed); - var it = ty.iterateStructOffsets(mod); - while (it.next()) |field_offset| { - if (index == field_offset.field) - return field_offset.offset; - } - - return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); - }, - .union_type => |union_type| { if (!union_type.hasTag()) return 0; @@ -4655,10 +4389,6 @@ pub const Type = struct { inferred_alloc_const, // See last_no_payload_tag below. // After this, the tag requires a payload. - /// Possible Value tags for this: @"struct" - tuple, - /// Possible Value tags for this: @"struct" - anon_struct, pointer, function, optional, @@ -4691,8 +4421,6 @@ pub const Type = struct { .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, - .tuple => Payload.Tuple, - .anon_struct => Payload.AnonStruct, }; } @@ -4723,83 +4451,48 @@ pub const Type = struct { pub fn isTuple(ty: Type, mod: *Module) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => true, - else => false, - }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; return struct_obj.is_tuple; }, + .anon_struct_type => |anon_struct| anon_struct.names.len == 0, else => false, }, }; } - pub fn isAnonStruct(ty: Type) bool { - return switch (ty.ip_index) { - .empty_struct_type => true, - .none => switch (ty.tag()) { - .anon_struct => true, - else => false, - }, + pub fn isAnonStruct(ty: Type, mod: *Module) bool { + if (ty.ip_index == .empty_struct_type) return true; + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0, else => false, }; } pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { - return switch (ty.ip_index) { - .empty_struct_type => true, - .none => switch (ty.tag()) { - .tuple, .anon_struct => true, - else => false, - }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; - return struct_obj.is_tuple; - }, - else => false, - }, - }; - } - - pub fn isSimpleTuple(ty: Type) bool { - return switch (ty.ip_index) { - .empty_struct_type => true, - .none => switch (ty.tag()) { - .tuple => true, - else => false, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + return struct_obj.is_tuple; }, + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, else => false, }; } - pub fn isSimpleTupleOrAnonStruct(ty: Type) bool { - return switch (ty.ip_index) { - .empty_struct_type => true, - .none => switch (ty.tag()) { - .tuple, .anon_struct => true, - else => false, - }, + pub fn isSimpleTuple(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, else => false, }; } - // Only allowed for simple tuple types - pub fn tupleFields(ty: Type) Payload.Tuple.Data { - return switch (ty.ip_index) { - .empty_struct_type => .{ .types = &.{}, .values = &.{} }, - .none => switch (ty.tag()) { - .tuple => ty.castTag(.tuple).?.data, - .anon_struct => .{ - .types = ty.castTag(.anon_struct).?.data.types, - .values = ty.castTag(.anon_struct).?.data.values, - }, - else => unreachable, - }, - else => unreachable, + pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => true, + else => false, }; } @@ -4947,29 +4640,6 @@ pub const Type = struct { /// memory is owned by `Module` data: []const u8, }; - - pub const Tuple = struct { - base: Payload = .{ .tag = .tuple }, - data: Data, - - pub const Data = struct { - types: []Type, - /// unreachable_value elements are used to indicate runtime-known. - values: []Value, - }; - }; - - pub const AnonStruct = struct { - base: Payload = .{ .tag = .anon_struct }, - data: Data, - - pub const Data = struct { - names: []const []const u8, - types: []Type, - /// unreachable_value elements are used to indicate runtime-known. - values: []Value, - }; - }; }; pub const @"u1": Type = .{ .ip_index = .u1_type, .legacy = undefined }; diff --git a/src/value.zig b/src/value.zig index 84408424f048..50e3fc80610d 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1889,26 +1889,28 @@ pub const Value = struct { const b_field_vals = b.castTag(.aggregate).?.data; assert(a_field_vals.len == b_field_vals.len); - if (ty.isSimpleTupleOrAnonStruct()) { - const types = ty.tupleFields().types; - assert(types.len == a_field_vals.len); - for (types, 0..) |field_ty, i| { - if (!(try eqlAdvanced(a_field_vals[i], field_ty, b_field_vals[i], field_ty, mod, opt_sema))) { - return false; + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |anon_struct| { + assert(anon_struct.types.len == a_field_vals.len); + for (anon_struct.types, 0..) |field_ty, i| { + if (!(try eqlAdvanced(a_field_vals[i], field_ty.toType(), b_field_vals[i], field_ty.toType(), mod, opt_sema))) { + return false; + } } - } - return true; - } - - if (ty.zigTypeTag(mod) == .Struct) { - const fields = ty.structFields(mod).values(); - assert(fields.len == a_field_vals.len); - for (fields, 0..) |field, i| { - if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) { - return false; + return true; + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const fields = struct_obj.fields.values(); + assert(fields.len == a_field_vals.len); + for (fields, 0..) |field, i| { + if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) { + return false; + } } - } - return true; + return true; + }, + else => {}, } const elem_ty = ty.childType(mod); @@ -2017,20 +2019,6 @@ pub const Value = struct { if ((try ty.onePossibleValue(mod)) != null) { return true; } - if (a_ty.castTag(.anon_struct)) |payload| { - const tuple = payload.data; - if (tuple.values.len != 1) { - return false; - } - const field_name = tuple.names[0]; - const union_obj = mod.typeToUnion(ty).?; - const field_index = @intCast(u32, union_obj.fields.getIndex(field_name) orelse return false); - const tag_and_val = b.castTag(.@"union").?.data; - const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, field_index); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); - if (!tag_matches) return false; - return eqlAdvanced(tag_and_val.val, union_obj.tag_ty, tuple.values[0], tuple.types[0], mod, opt_sema); - } return false; }, .Float => { From 6a9a918fbe4adc23dd7d7573c6f1e499f4be074e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 14 May 2023 20:37:22 -0700 Subject: [PATCH 067/205] stage2: encode one-possible-value tuple specially Anonymous structs and anonymous tuples can be stored via a only_possible_value tag because their type encodings, by definition, will have every value specified, which can be used to populate the fields slice in `Key.Aggregate`. Also fix `isTupleOrAnonStruct`. --- src/InternPool.zig | 44 +++++++++++++++++++++++++++++++++----------- src/Sema.zig | 27 +++++++++------------------ src/type.zig | 27 ++++++++++----------------- 3 files changed, 52 insertions(+), 46 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 74cc452176f8..2435e0ad3143 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1166,10 +1166,10 @@ pub const Tag = enum(u8) { /// Module.Struct object allocated for it. /// data is Module.Namespace.Index. type_struct_ns, - /// An AnonStructType which stores types, names, and values for each field. + /// An AnonStructType which stores types, names, and values for fields. /// data is extra index of `TypeStructAnon`. type_struct_anon, - /// An AnonStructType which has only types and values for each field. + /// An AnonStructType which has only types and values for fields. /// data is extra index of `TypeStructAnon`. type_tuple_anon, /// A tagged union type. @@ -1272,7 +1272,8 @@ pub const Tag = enum(u8) { /// only one possible value. Not all only-possible-values are encoded this way; /// for example structs which have all comptime fields are not encoded this way. /// The set of values that are encoded this way is: - /// * A struct which has 0 fields. + /// * An array or vector which has length 0. + /// * A struct which has all fields comptime-known. /// data is Index of the type, which is known to be zero bits at runtime. only_possible_value, /// data is extra index to Key.Union. @@ -1863,10 +1864,21 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .only_possible_value => { const ty = @intToEnum(Index, data); return switch (ip.indexToKey(ty)) { + // TODO: migrate structs to properly use the InternPool rather + // than using the SegmentedList trick, then the struct type will + // have a slice of comptime values that can be used here for when + // the struct has one possible value due to all fields comptime (same + // as the tuple case below). .struct_type => .{ .aggregate = .{ .ty = ty, .fields = &.{}, } }, + // There is only one possible value precisely due to the + // fact that this values slice is fully populated! + .anon_struct_type => |anon_struct_type| .{ .aggregate = .{ + .ty = ty, + .fields = anon_struct_type.values, + } }, else => unreachable, }; }, @@ -2392,12 +2404,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .aggregate => |aggregate| { assert(aggregate.ty != .none); for (aggregate.fields) |elem| assert(elem != .none); - if (aggregate.fields.len != ip.aggregateTypeLen(aggregate.ty)) { - std.debug.print("aggregate fields len = {d}, type len = {d}\n", .{ - aggregate.fields.len, - ip.aggregateTypeLen(aggregate.ty), - }); - } assert(aggregate.fields.len == ip.aggregateTypeLen(aggregate.ty)); if (aggregate.fields.len == 0) { @@ -2408,6 +2414,22 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } + switch (ip.indexToKey(aggregate.ty)) { + .anon_struct_type => |anon_struct_type| { + if (std.mem.eql(Index, anon_struct_type.values, aggregate.fields)) { + // This encoding works thanks to the fact that, as we just verified, + // the type itself contains a slice of values that can be provided + // in the aggregate fields. + ip.items.appendAssumeCapacity(.{ + .tag = .only_possible_value, + .data = @enumToInt(aggregate.ty), + }); + return @intToEnum(Index, ip.items.len - 1); + } + }, + else => {}, + } + try ip.extra.ensureUnusedCapacity( gpa, @typeInfo(Aggregate).Struct.fields.len + aggregate.fields.len, @@ -3121,8 +3143,8 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { } }; counts.sort(SortContext{ .map = &counts }); - const len = @min(50, counts.count()); - std.debug.print(" top 50 tags:\n", .{}); + const len = @min(25, counts.count()); + std.debug.print(" top 25 tags:\n", .{}); for (counts.keys()[0..len], counts.values()[0..len]) |tag, stats| { std.debug.print(" {s}: {d} occurrences, {d} total bytes\n", .{ @tagName(tag), stats.count, stats.bytes, diff --git a/src/Sema.zig b/src/Sema.zig index 31e07bdcdca0..74b3cdd11486 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -18237,6 +18237,7 @@ fn zirStructInitAnon( return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(init)) |init_val| { + assert(init_val.ip_index != .none); values[i] = init_val.ip_index; } else { values[i] = .none; @@ -33181,8 +33182,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // TODO: this is incorrect for structs with comptime fields, I think // we should use a temporary allocator to construct an aggregate that // is populated with the comptime values and then intern that value here. - // This TODO is repeated for anon_struct_type below, as well as - // in the redundant implementation of one-possible-value in type.zig. + // This TODO is repeated in the redundant implementation of + // one-possible-value in type.zig. const empty = try mod.intern(.{ .aggregate = .{ .ty = ty.ip_index, .fields = &.{}, @@ -33191,25 +33192,15 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .anon_struct_type => |tuple| { - for (tuple.types, tuple.values) |field_ty, val| { - const is_comptime = val != .none; - if (is_comptime) continue; - if ((try sema.typeHasOnePossibleValue(field_ty.toType())) != null) continue; - return null; + for (tuple.values) |val| { + if (val == .none) return null; } - // In this case the struct has no runtime-known fields and + // In this case the struct has all comptime-known fields and // therefore has one possible value. - - // TODO: this is incorrect for structs with comptime fields, I think - // we should use a temporary allocator to construct an aggregate that - // is populated with the comptime values and then intern that value here. - // This TODO is repeated for struct_type above, as well as - // in the redundant implementation of one-possible-value in type.zig. - const empty = try mod.intern(.{ .aggregate = .{ + return (try mod.intern(.{ .aggregate = .{ .ty = ty.ip_index, - .fields = &.{}, - } }); - return empty.toValue(); + .fields = tuple.values, + } })).toValue(); }, .union_type => |union_type| { diff --git a/src/type.zig b/src/type.zig index ee9e7c8e1700..32fa64a1ace9 100644 --- a/src/type.zig +++ b/src/type.zig @@ -3583,8 +3583,8 @@ pub const Type = struct { // TODO: this is incorrect for structs with comptime fields, I think // we should use a temporary allocator to construct an aggregate that // is populated with the comptime values and then intern that value here. - // This TODO is repeated for anon_struct_type below, as well as in - // the redundant implementation of one-possible-value logic in Sema.zig. + // This TODO is repeated in the redundant implementation of + // one-possible-value logic in Sema.zig. const empty = try mod.intern(.{ .aggregate = .{ .ty = ty.ip_index, .fields = &.{}, @@ -3593,22 +3593,15 @@ pub const Type = struct { }, .anon_struct_type => |tuple| { - for (tuple.types, tuple.values) |field_ty, val| { - if (val != .none) continue; // comptime field - if ((try field_ty.toType().onePossibleValue(mod)) != null) continue; - return null; + for (tuple.values) |val| { + if (val == .none) return null; } - - // TODO: this is incorrect for structs with comptime fields, I think - // we should use a temporary allocator to construct an aggregate that - // is populated with the comptime values and then intern that value here. - // This TODO is repeated for struct_type above, as well as in - // the redundant implementation of one-possible-value logic in Sema.zig. - const empty = try mod.intern(.{ .aggregate = .{ + // In this case the struct has all comptime-known fields and + // therefore has one possible value. + return (try mod.intern(.{ .aggregate = .{ .ty = ty.ip_index, - .fields = &.{}, - } }); - return empty.toValue(); + .fields = tuple.values, + } })).toValue(); }, .union_type => |union_type| { @@ -4477,7 +4470,7 @@ pub const Type = struct { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; return struct_obj.is_tuple; }, - .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, + .anon_struct_type => true, else => false, }; } From 17882162b3be5542b4e289e5ddc6535a4bb4c6b1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 15 May 2023 20:09:54 -0700 Subject: [PATCH 068/205] stage2: move function types to InternPool --- lib/std/builtin.zig | 2 +- src/Air.zig | 7 +- src/InternPool.zig | 244 ++++++++++++++++++---- src/Module.zig | 38 +++- src/Sema.zig | 334 +++++++++++++++-------------- src/Zir.zig | 5 +- src/arch/aarch64/CodeGen.zig | 44 ++-- src/arch/arm/CodeGen.zig | 44 ++-- src/arch/riscv64/CodeGen.zig | 22 +- src/arch/sparc64/CodeGen.zig | 22 +- src/arch/wasm/CodeGen.zig | 92 ++++---- src/arch/x86_64/CodeGen.zig | 34 +-- src/codegen.zig | 2 +- src/codegen/c.zig | 13 +- src/codegen/c/type.zig | 34 +-- src/codegen/llvm.zig | 235 +++++++++++---------- src/codegen/spirv.zig | 22 +- src/link/Coff.zig | 2 +- src/link/Dwarf.zig | 2 +- src/link/SpirV.zig | 6 +- src/target.zig | 11 + src/type.zig | 392 ++++++++++------------------------- src/value.zig | 5 + 23 files changed, 821 insertions(+), 791 deletions(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 429654bd4a6b..3e8970a354e7 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -143,7 +143,7 @@ pub const Mode = OptimizeMode; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. -pub const CallingConvention = enum { +pub const CallingConvention = enum(u8) { /// This is the default Zig calling convention used when not using `export` on `fn` /// and no other calling convention is specified. Unspecified, diff --git a/src/Air.zig b/src/Air.zig index e82a70100f5a..09f8d6c9e2e2 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -845,7 +845,6 @@ pub const Inst = struct { pub const Ref = enum(u32) { u1_type = @enumToInt(InternPool.Index.u1_type), - u5_type = @enumToInt(InternPool.Index.u5_type), u8_type = @enumToInt(InternPool.Index.u8_type), i8_type = @enumToInt(InternPool.Index.i8_type), u16_type = @enumToInt(InternPool.Index.u16_type), @@ -914,8 +913,8 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), - one_u5 = @enumToInt(InternPool.Index.one_u5), - four_u5 = @enumToInt(InternPool.Index.four_u5), + one_u8 = @enumToInt(InternPool.Index.one_u8), + four_u8 = @enumToInt(InternPool.Index.four_u8), negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), @@ -1383,7 +1382,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); - return callee_ty.fnReturnType(); + return callee_ty.fnReturnTypeIp(ip); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { diff --git a/src/InternPool.zig b/src/InternPool.zig index 2435e0ad3143..d4bfe5a24431 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -148,6 +148,7 @@ pub const Key = union(enum) { union_type: UnionType, opaque_type: OpaqueType, enum_type: EnumType, + func_type: FuncType, /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented /// via `simple_value` and has a named `Index` tag for it. @@ -185,6 +186,13 @@ pub const Key = union(enum) { /// If zero use pointee_type.abiAlignment() /// When creating pointer types, if alignment is equal to pointee type /// abi alignment, this value should be set to 0 instead. + /// + /// Please don't change this to u32 or u29. If you want to save bits, + /// migrate the rest of the codebase to use the `Alignment` type rather + /// than using byte units. The LLVM backend can only handle `c_uint` + /// byte units; we can emit a semantic analysis error if alignment that + /// overflows that amount is attempted to be used, but it shouldn't + /// affect the other backends. alignment: u64 = 0, /// If this is non-zero it means the pointer points to a sub-byte /// range of data, which is backed by a "host integer" with this @@ -358,6 +366,44 @@ pub const Key = union(enum) { } }; + pub const FuncType = struct { + param_types: []Index, + return_type: Index, + /// Tells whether a parameter is comptime. See `paramIsComptime` helper + /// method for accessing this. + comptime_bits: u32, + /// Tells whether a parameter is noalias. See `paramIsNoalias` helper + /// method for accessing this. + noalias_bits: u32, + /// If zero use default target function code alignment. + /// + /// Please don't change this to u32 or u29. If you want to save bits, + /// migrate the rest of the codebase to use the `Alignment` type rather + /// than using byte units. The LLVM backend can only handle `c_uint` + /// byte units; we can emit a semantic analysis error if alignment that + /// overflows that amount is attempted to be used, but it shouldn't + /// affect the other backends. + alignment: u64, + cc: std.builtin.CallingConvention, + is_var_args: bool, + is_generic: bool, + is_noinline: bool, + align_is_generic: bool, + cc_is_generic: bool, + section_is_generic: bool, + addrspace_is_generic: bool, + + pub fn paramIsComptime(self: @This(), i: u5) bool { + assert(i < self.param_types.len); + return @truncate(u1, self.comptime_bits >> i) != 0; + } + + pub fn paramIsNoalias(self: @This(), i: u5) bool { + assert(i < self.param_types.len); + return @truncate(u1, self.noalias_bits >> i) != 0; + } + }; + pub const Int = struct { ty: Index, storage: Storage, @@ -512,6 +558,18 @@ pub const Key = union(enum) { for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem); for (anon_struct_type.names) |elem| std.hash.autoHash(hasher, elem); }, + + .func_type => |func_type| { + for (func_type.param_types) |param_type| std.hash.autoHash(hasher, param_type); + std.hash.autoHash(hasher, func_type.return_type); + std.hash.autoHash(hasher, func_type.comptime_bits); + std.hash.autoHash(hasher, func_type.noalias_bits); + std.hash.autoHash(hasher, func_type.alignment); + std.hash.autoHash(hasher, func_type.cc); + std.hash.autoHash(hasher, func_type.is_var_args); + std.hash.autoHash(hasher, func_type.is_generic); + std.hash.autoHash(hasher, func_type.is_noinline); + }, } } @@ -670,6 +728,20 @@ pub const Key = union(enum) { std.mem.eql(Index, a_info.values, b_info.values) and std.mem.eql(NullTerminatedString, a_info.names, b_info.names); }, + + .func_type => |a_info| { + const b_info = b.func_type; + + return std.mem.eql(Index, a_info.param_types, b_info.param_types) and + a_info.return_type == b_info.return_type and + a_info.comptime_bits == b_info.comptime_bits and + a_info.noalias_bits == b_info.noalias_bits and + a_info.alignment == b_info.alignment and + a_info.cc == b_info.cc and + a_info.is_var_args == b_info.is_var_args and + a_info.is_generic == b_info.is_generic and + a_info.is_noinline == b_info.is_noinline; + }, } } @@ -687,6 +759,7 @@ pub const Key = union(enum) { .opaque_type, .enum_type, .anon_struct_type, + .func_type, => .type_type, inline .ptr, @@ -734,7 +807,6 @@ pub const Index = enum(u32) { pub const last_value: Index = .empty_struct; u1_type, - u5_type, u8_type, i8_type, u16_type, @@ -811,10 +883,10 @@ pub const Index = enum(u32) { one, /// `1` (usize) one_usize, - /// `1` (u5) - one_u5, - /// `4` (u5) - four_u5, + /// `1` (u8) + one_u8, + /// `4` (u8) + four_u8, /// `-1` (comptime_int) negative_one, /// `std.builtin.CallingConvention.C` @@ -880,12 +952,6 @@ pub const static_keys = [_]Key{ .bits = 1, } }, - // u5_type - .{ .int_type = .{ - .signedness = .unsigned, - .bits = 5, - } }, - .{ .int_type = .{ .signedness = .unsigned, .bits = 8, @@ -1074,14 +1140,14 @@ pub const static_keys = [_]Key{ .storage = .{ .u64 = 1 }, } }, - // one_u5 + // one_u8 .{ .int = .{ - .ty = .u5_type, + .ty = .u8_type, .storage = .{ .u64 = 1 }, } }, - // four_u5 + // four_u8 .{ .int = .{ - .ty = .u5_type, + .ty = .u8_type, .storage = .{ .u64 = 4 }, } }, // negative_one @@ -1092,12 +1158,12 @@ pub const static_keys = [_]Key{ // calling_convention_c .{ .enum_tag = .{ .ty = .calling_convention_type, - .int = .one_u5, + .int = .one_u8, } }, // calling_convention_inline .{ .enum_tag = .{ .ty = .calling_convention_type, - .int = .four_u5, + .int = .four_u8, } }, .{ .simple_value = .void }, @@ -1181,6 +1247,9 @@ pub const Tag = enum(u8) { /// An untagged union type which has a safety tag. /// `data` is `Module.Union.Index`. type_union_safety, + /// A function body type. + /// `data` is extra index to `TypeFunction`. + type_function, /// Typed `undefined`. /// `data` is `Index` of the type. @@ -1283,6 +1352,29 @@ pub const Tag = enum(u8) { aggregate, }; +/// Trailing: +/// 0. param_type: Index for each params_len +pub const TypeFunction = struct { + params_len: u32, + return_type: Index, + comptime_bits: u32, + noalias_bits: u32, + flags: Flags, + + pub const Flags = packed struct(u32) { + alignment: Alignment, + cc: std.builtin.CallingConvention, + is_var_args: bool, + is_generic: bool, + is_noinline: bool, + align_is_generic: bool, + cc_is_generic: bool, + section_is_generic: bool, + addrspace_is_generic: bool, + _: u11 = 0, + }; +}; + /// Trailing: /// 0. element: Index for each len /// len is determined by the aggregate type. @@ -1371,24 +1463,6 @@ pub const Pointer = struct { flags: Flags, packed_offset: PackedOffset, - /// Stored as a power-of-two, with one special value to indicate none. - pub const Alignment = enum(u6) { - none = std.math.maxInt(u6), - _, - - pub fn toByteUnits(a: Alignment, default: u64) u64 { - return switch (a) { - .none => default, - _ => @as(u64, 1) << @enumToInt(a), - }; - } - - pub fn fromByteUnits(n: u64) Alignment { - if (n == 0) return .none; - return @intToEnum(Alignment, @ctz(n)); - } - }; - pub const Flags = packed struct(u32) { size: Size, alignment: Alignment, @@ -1409,6 +1483,24 @@ pub const Pointer = struct { pub const VectorIndex = Key.PtrType.VectorIndex; }; +/// Stored as a power-of-two, with one special value to indicate none. +pub const Alignment = enum(u6) { + none = std.math.maxInt(u6), + _, + + pub fn toByteUnits(a: Alignment, default: u64) u64 { + return switch (a) { + .none => default, + _ => @as(u64, 1) << @enumToInt(a), + }; + } + + pub fn fromByteUnits(n: u64) Alignment { + if (n == 0) return .none; + return @intToEnum(Alignment, @ctz(n)); + } +}; + /// Used for non-sentineled arrays that have length fitting in u32, as well as /// vectors. pub const Vector = struct { @@ -1765,6 +1857,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }, .type_enum_explicit => indexToKeyEnum(ip, data, .explicit), .type_enum_nonexhaustive => indexToKeyEnum(ip, data, .nonexhaustive), + .type_function => .{ .func_type = indexToKeyFuncType(ip, data) }, .undef => .{ .undef = @intToEnum(Index, data) }, .opt_null => .{ .opt = .{ @@ -1896,6 +1989,29 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }; } +fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { + const type_function = ip.extraDataTrail(TypeFunction, data); + const param_types = @ptrCast( + []Index, + ip.extra.items[type_function.end..][0..type_function.data.params_len], + ); + return .{ + .param_types = param_types, + .return_type = type_function.data.return_type, + .comptime_bits = type_function.data.comptime_bits, + .noalias_bits = type_function.data.noalias_bits, + .alignment = type_function.data.flags.alignment.toByteUnits(0), + .cc = type_function.data.flags.cc, + .is_var_args = type_function.data.flags.is_var_args, + .is_generic = type_function.data.flags.is_generic, + .is_noinline = type_function.data.flags.is_noinline, + .align_is_generic = type_function.data.flags.align_is_generic, + .cc_is_generic = type_function.data.flags.cc_is_generic, + .section_is_generic = type_function.data.flags.section_is_generic, + .addrspace_is_generic = type_function.data.flags.addrspace_is_generic, + }; +} + /// Asserts the integer tag type is already present in the InternPool. fn getEnumIntTagType(ip: InternPool, fields_len: u32) Index { return ip.getAssumeExists(.{ .int_type = .{ @@ -1977,7 +2093,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .child = ptr_type.elem_type, .sentinel = ptr_type.sentinel, .flags = .{ - .alignment = Pointer.Alignment.fromByteUnits(ptr_type.alignment), + .alignment = Alignment.fromByteUnits(ptr_type.alignment), .is_const = ptr_type.is_const, .is_volatile = ptr_type.is_volatile, .is_allowzero = ptr_type.is_allowzero, @@ -2163,6 +2279,37 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } }, + .func_type => |func_type| { + assert(func_type.return_type != .none); + for (func_type.param_types) |param_type| assert(param_type != .none); + + const params_len = @intCast(u32, func_type.param_types.len); + + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(TypeFunction).Struct.fields.len + + params_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_function, + .data = ip.addExtraAssumeCapacity(TypeFunction{ + .params_len = params_len, + .return_type = func_type.return_type, + .comptime_bits = func_type.comptime_bits, + .noalias_bits = func_type.noalias_bits, + .flags = .{ + .alignment = Alignment.fromByteUnits(func_type.alignment), + .cc = func_type.cc, + .is_var_args = func_type.is_var_args, + .is_generic = func_type.is_generic, + .is_noinline = func_type.is_noinline, + .align_is_generic = func_type.align_is_generic, + .cc_is_generic = func_type.cc_is_generic, + .section_is_generic = func_type.section_is_generic, + .addrspace_is_generic = func_type.addrspace_is_generic, + }, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types)); + }, + .extern_func => @panic("TODO"), .ptr => |ptr| switch (ptr.addr) { @@ -2736,6 +2883,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { OptionalMapIndex => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), Pointer.Flags => @bitCast(u32, @field(extra, field.name)), + TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), Pointer.VectorIndex => @enumToInt(@field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), @@ -2797,6 +2945,7 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: OptionalMapIndex => @intToEnum(OptionalMapIndex, int32), i32 => @bitCast(i32, int32), Pointer.Flags => @bitCast(Pointer.Flags, int32), + TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32), else => @compileError("bad field type: " ++ @typeName(field.type)), @@ -2988,17 +3137,17 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind } } -pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex { +pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex { + assert(val != .none); const tags = ip.items.items(.tag); - if (val == .none) return .none; if (tags[@enumToInt(val)] != .type_struct) return .none; const datas = ip.items.items(.data); return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); } -pub fn indexToUnion(ip: *InternPool, val: Index) Module.Union.OptionalIndex { +pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex { + assert(val != .none); const tags = ip.items.items(.tag); - if (val == .none) return .none; switch (tags[@enumToInt(val)]) { .type_union_tagged, .type_union_untagged, .type_union_safety => {}, else => return .none, @@ -3007,6 +3156,16 @@ pub fn indexToUnion(ip: *InternPool, val: Index) Module.Union.OptionalIndex { return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional(); } +pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { + assert(val != .none); + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + switch (tags[@enumToInt(val)]) { + .type_function => return indexToKeyFuncType(ip, datas[@enumToInt(val)]), + else => return null, + } +} + pub fn isOptionalType(ip: InternPool, ty: Index) bool { const tags = ip.items.items(.tag); if (ty == .none) return false; @@ -3092,6 +3251,11 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_union_safety, => @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), + .type_function => b: { + const info = ip.extraData(TypeFunction, data); + break :b @sizeOf(TypeFunction) + (@sizeOf(u32) * info.params_len); + }, + .undef => 0, .simple_type => 0, .simple_value => 0, diff --git a/src/Module.zig b/src/Module.zig index cf1fea3444c9..c8e676f813b5 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -846,7 +846,7 @@ pub const Decl = struct { pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex { if (!decl.owns_tv) return .none; const ty = (decl.val.castTag(.ty) orelse return .none).data; - return mod.intern_pool.indexToStruct(ty.ip_index); + return mod.intern_pool.indexToStructType(ty.ip_index); } /// If the Decl has a value and it is a union, return it, @@ -4764,7 +4764,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.analysis = .complete; decl.generation = mod.generation; - const is_inline = decl.ty.fnCallingConvention() == .Inline; + const is_inline = decl.ty.fnCallingConvention(mod) == .Inline; if (decl.is_exported) { const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) }; if (is_inline) { @@ -5617,6 +5617,9 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); + const fn_ty = decl.ty; + const fn_ty_info = mod.typeToFunc(fn_ty).?; + var sema: Sema = .{ .mod = mod, .gpa = gpa, @@ -5626,7 +5629,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { .owner_decl = decl, .owner_decl_index = decl_index, .func = func, - .fn_ret_ty = decl.ty.fnReturnType(), + .fn_ret_ty = fn_ty_info.return_type.toType(), .owner_func = func, .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), }; @@ -5664,8 +5667,6 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. - const fn_ty = decl.ty; - const fn_ty_info = fn_ty.fnInfo(); const runtime_params_len = @intCast(u32, fn_ty_info.param_types.len); try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` @@ -5692,7 +5693,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { sema.inst_map.putAssumeCapacityNoClobber(inst, arg); total_param_index += 1; continue; - } else fn_ty_info.param_types[runtime_param_index]; + } else fn_ty_info.param_types[runtime_param_index].toType(); const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, @@ -6864,6 +6865,10 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); } +pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type { + return (try intern(mod, .{ .func_type = info })).toType(); +} + /// Supports optionals in addition to pointers. pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { if (ty.isPtrLikeOptional(mod)) { @@ -6996,6 +7001,16 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { return i.toValue(); } +pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value { + const ip = &mod.intern_pool; + assert(ip.isOptionalType(opt_ty.ip_index)); + const result = try ip.get(mod.gpa, .{ .opt = .{ + .ty = opt_ty.ip_index, + .val = .none, + } }); + return result.toValue(); +} + pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); } @@ -7201,15 +7216,22 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I /// * A struct which has no fields (`struct {}`). /// * Not a struct. pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct { - const struct_index = mod.intern_pool.indexToStruct(ty.ip_index).unwrap() orelse return null; + if (ty.ip_index == .none) return null; + const struct_index = mod.intern_pool.indexToStructType(ty.ip_index).unwrap() orelse return null; return mod.structPtr(struct_index); } pub fn typeToUnion(mod: *Module, ty: Type) ?*Union { - const union_index = mod.intern_pool.indexToUnion(ty.ip_index).unwrap() orelse return null; + if (ty.ip_index == .none) return null; + const union_index = mod.intern_pool.indexToUnionType(ty.ip_index).unwrap() orelse return null; return mod.unionPtr(union_index); } +pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { + if (ty.ip_index == .none) return null; + return mod.intern_pool.indexToFuncType(ty.ip_index); +} + pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { @setCold(true); const owner_decl = mod.declPtr(owner_decl_index); diff --git a/src/Sema.zig b/src/Sema.zig index 74b3cdd11486..eb8dc5a633f2 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5850,6 +5850,7 @@ pub fn analyzeExport( } fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const src = LazySrcLoc.nodeOffset(extra.node); @@ -5862,8 +5863,8 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst const func = sema.func orelse return sema.fail(block, src, "@setAlignStack outside function body", .{}); - const fn_owner_decl = sema.mod.declPtr(func.owner_decl); - switch (fn_owner_decl.ty.fnCallingConvention()) { + const fn_owner_decl = mod.declPtr(func.owner_decl); + switch (fn_owner_decl.ty.fnCallingConvention(mod)) { .Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}), .Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}), else => if (block.inlining != null) { @@ -5871,7 +5872,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst }, } - const gop = try sema.mod.align_stack_fns.getOrPut(sema.mod.gpa, func); + const gop = try mod.align_stack_fns.getOrPut(mod.gpa, func); if (gop.found_existing) { const msg = msg: { const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{}); @@ -6378,7 +6379,7 @@ fn zirCall( var input_is_error = false; const block_index = @intCast(Air.Inst.Index, block.instructions.items.len); - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const parent_comptime = block.is_comptime; // `extra_index` and `arg_index` are separate since the bound function is passed as the first argument. @@ -6393,7 +6394,7 @@ fn zirCall( // Generate args to comptime params in comptime block. defer block.is_comptime = parent_comptime; - if (arg_index < fn_params_len and func_ty_info.comptime_params[arg_index]) { + if (arg_index < fn_params_len and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) { block.is_comptime = true; // TODO set comptime_reason } @@ -6402,10 +6403,10 @@ fn zirCall( if (arg_index >= fn_params_len) break :inst Air.Inst.Ref.var_args_param_type; - if (func_ty_info.param_types[arg_index].isGenericPoison()) + if (func_ty_info.param_types[arg_index] == .generic_poison_type) break :inst Air.Inst.Ref.generic_poison_type; - break :inst try sema.addType(func_ty_info.param_types[arg_index]); + break :inst try sema.addType(func_ty_info.param_types[arg_index].toType()); }); const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst); @@ -6506,7 +6507,7 @@ fn checkCallArgumentCount( return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(sema.mod)}); }; - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const args_len = total_args - @boolToInt(member_fn); if (func_ty_info.is_var_args) { @@ -6562,7 +6563,7 @@ fn callBuiltin( std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(sema.mod)}); }; - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; if (args.len != fn_params_len or (func_ty_info.is_var_args and args.len < fn_params_len)) { std.debug.panic("parameter count mismatch calling builtin fn, expected {d}, found {d}", .{ fn_params_len, args.len }); @@ -6573,7 +6574,7 @@ fn callBuiltin( const GenericCallAdapter = struct { generic_fn: *Module.Fn, precomputed_hash: u64, - func_ty_info: Type.Payload.Function.Data, + func_ty_info: InternPool.Key.FuncType, args: []const Arg, module: *Module, @@ -6656,7 +6657,7 @@ fn analyzeCall( const mod = sema.mod; const callee_ty = sema.typeOf(func); - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const cc = func_ty_info.cc; if (cc == .Naked) { @@ -6704,7 +6705,7 @@ fn analyzeCall( var comptime_reason_buf: Block.ComptimeReason = undefined; var comptime_reason: ?*const Block.ComptimeReason = null; if (!is_comptime_call) { - if (sema.typeRequiresComptime(func_ty_info.return_type)) |ct| { + if (sema.typeRequiresComptime(func_ty_info.return_type.toType())) |ct| { is_comptime_call = ct; if (ct) { // stage1 can't handle doing this directly @@ -6712,7 +6713,7 @@ fn analyzeCall( .block = block, .func = func, .func_src = func_src, - .return_ty = func_ty_info.return_type, + .return_ty = func_ty_info.return_type.toType(), } }; comptime_reason = &comptime_reason_buf; } @@ -6750,7 +6751,7 @@ fn analyzeCall( .block = block, .func = func, .func_src = func_src, - .return_ty = func_ty_info.return_type, + .return_ty = func_ty_info.return_type.toType(), } }; comptime_reason = &comptime_reason_buf; }, @@ -6875,9 +6876,9 @@ fn analyzeCall( // comptime state. var should_memoize = true; - var new_fn_info = fn_owner_decl.ty.fnInfo(); - new_fn_info.param_types = try sema.arena.alloc(Type, new_fn_info.param_types.len); - new_fn_info.comptime_params = (try sema.arena.alloc(bool, new_fn_info.param_types.len)).ptr; + var new_fn_info = mod.typeToFunc(fn_owner_decl.ty).?; + new_fn_info.param_types = try sema.arena.alloc(InternPool.Index, new_fn_info.param_types.len); + new_fn_info.comptime_bits = 0; // This will have return instructions analyzed as break instructions to // the block_inst above. Here we are performing "comptime/inline semantic analysis" @@ -6970,7 +6971,7 @@ fn analyzeCall( } break :blk bare_return_type; }; - new_fn_info.return_type = fn_ret_ty; + new_fn_info.return_type = fn_ret_ty.ip_index; const parent_fn_ret_ty = sema.fn_ret_ty; sema.fn_ret_ty = fn_ret_ty; defer sema.fn_ret_ty = parent_fn_ret_ty; @@ -6993,7 +6994,7 @@ fn analyzeCall( } } - const new_func_resolved_ty = try Type.Tag.function.create(sema.arena, new_fn_info); + const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin); @@ -7081,13 +7082,14 @@ fn analyzeCall( assert(!func_ty_info.is_generic); const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len); + const fn_info = mod.typeToFunc(func_ty).?; for (uncasted_args, 0..) |uncasted_arg, i| { if (i < fn_params_len) { const opts: CoerceOpts = .{ .param_src = .{ .func_inst = func, .param_i = @intCast(u32, i), } }; - const param_ty = func_ty.fnParamType(i); + const param_ty = fn_info.param_types[i].toType(); args[i] = sema.analyzeCallArg( block, .unneeded, @@ -7126,8 +7128,8 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - try sema.queueFullTypeResolution(func_ty_info.return_type); - if (sema.owner_func != null and func_ty_info.return_type.isError(mod)) { + try sema.queueFullTypeResolution(func_ty_info.return_type.toType()); + if (sema.owner_func != null and func_ty_info.return_type.toType().isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -7155,7 +7157,7 @@ fn analyzeCall( try sema.ensureResultUsed(block, sema.typeOf(func_inst), call_src); } return sema.handleTailCall(block, call_src, func_ty, func_inst); - } else if (block.wantSafety() and func_ty_info.return_type.isNoReturn()) { + } else if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) { // Function pointers and extern functions aren't guaranteed to // actually be noreturn so we add a safety check for them. check: { @@ -7171,7 +7173,7 @@ fn analyzeCall( try sema.safetyPanic(block, .noreturn_returned); return Air.Inst.Ref.unreachable_value; - } else if (func_ty_info.return_type.isNoReturn()) { + } else if (func_ty_info.return_type == .noreturn_type) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -7208,13 +7210,13 @@ fn analyzeInlineCallArg( param_block: *Block, arg_src: LazySrcLoc, inst: Zir.Inst.Index, - new_fn_info: Type.Payload.Function.Data, + new_fn_info: InternPool.Key.FuncType, arg_i: *usize, uncasted_args: []const Air.Inst.Ref, is_comptime_call: bool, should_memoize: *bool, memoized_call_key: Module.MemoizedCall.Key, - raw_param_types: []const Type, + raw_param_types: []const InternPool.Index, func_inst: Air.Inst.Ref, has_comptime_args: *bool, ) !void { @@ -7233,13 +7235,14 @@ fn analyzeInlineCallArg( const param_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const param_ty = param_ty: { const raw_param_ty = raw_param_types[arg_i.*]; - if (!raw_param_ty.isGenericPoison()) break :param_ty raw_param_ty; + if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty; const param_ty_inst = try sema.resolveBody(param_block, param_body, inst); - break :param_ty try sema.analyzeAsType(param_block, param_src, param_ty_inst); + const param_ty = try sema.analyzeAsType(param_block, param_src, param_ty_inst); + break :param_ty param_ty.toIntern(); }; new_fn_info.param_types[arg_i.*] = param_ty; const uncasted_arg = uncasted_args[arg_i.*]; - if (try sema.typeRequiresComptime(param_ty)) { + if (try sema.typeRequiresComptime(param_ty.toType())) { _ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| { if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; @@ -7247,7 +7250,7 @@ fn analyzeInlineCallArg( } else if (!is_comptime_call and zir_tags[inst] == .param_comptime) { _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); } - const casted_arg = sema.coerceExtra(arg_block, param_ty, uncasted_arg, arg_src, .{ .param_src = .{ + const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{ .func_inst = func_inst, .param_i = @intCast(u32, arg_i.*), } }) catch |err| switch (err) { @@ -7276,7 +7279,7 @@ fn analyzeInlineCallArg( } should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); memoized_call_key.args[arg_i.*] = .{ - .ty = param_ty, + .ty = param_ty.toType(), .val = arg_val, }; } else { @@ -7292,7 +7295,7 @@ fn analyzeInlineCallArg( .param_anytype, .param_anytype_comptime => { // No coercion needed. const uncasted_arg = uncasted_args[arg_i.*]; - new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg); + new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg).toIntern(); if (is_comptime_call) { sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); @@ -7357,7 +7360,7 @@ fn analyzeGenericCallArg( uncasted_arg: Air.Inst.Ref, comptime_arg: TypedValue, runtime_args: []Air.Inst.Ref, - new_fn_info: Type.Payload.Function.Data, + new_fn_info: InternPool.Key.FuncType, runtime_i: *u32, ) !void { const mod = sema.mod; @@ -7365,7 +7368,7 @@ fn analyzeGenericCallArg( comptime_arg.ty.hasRuntimeBits(mod) and !(try sema.typeRequiresComptime(comptime_arg.ty)); if (is_runtime) { - const param_ty = new_fn_info.param_types[runtime_i.*]; + const param_ty = new_fn_info.param_types[runtime_i.*].toType(); const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src); try sema.queueFullTypeResolution(param_ty); runtime_args[runtime_i.*] = casted_arg; @@ -7387,7 +7390,7 @@ fn instantiateGenericCall( func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, - func_ty_info: Type.Payload.Function.Data, + func_ty_info: InternPool.Key.FuncType, ensure_result_used: bool, uncasted_args: []const Air.Inst.Ref, call_tag: Air.Inst.Tag, @@ -7431,14 +7434,14 @@ fn instantiateGenericCall( var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7609,7 +7612,7 @@ fn instantiateGenericCall( // Make a runtime call to the new function, making sure to omit the comptime args. const comptime_args = callee.comptime_args.?; const func_ty = mod.declPtr(callee.owner_decl).ty; - const new_fn_info = func_ty.fnInfo(); + const new_fn_info = mod.typeToFunc(func_ty).?; const runtime_args_len = @intCast(u32, new_fn_info.param_types.len); const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); { @@ -7647,12 +7650,12 @@ fn instantiateGenericCall( total_i += 1; } - try sema.queueFullTypeResolution(new_fn_info.return_type); + try sema.queueFullTypeResolution(new_fn_info.return_type.toType()); } if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - if (sema.owner_func != null and new_fn_info.return_type.isError(mod)) { + if (sema.owner_func != null and new_fn_info.return_type.toType().isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -7677,7 +7680,7 @@ fn instantiateGenericCall( if (call_tag == .call_always_tail) { return sema.handleTailCall(block, call_src, func_ty, result); } - if (new_fn_info.return_type.isNoReturn()) { + if (new_fn_info.return_type == .noreturn_type) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -7695,7 +7698,7 @@ fn resolveGenericInstantiationType( module_fn: *Module.Fn, new_module_func: *Module.Fn, namespace: Namespace.Index, - func_ty_info: Type.Payload.Function.Data, + func_ty_info: InternPool.Key.FuncType, call_src: LazySrcLoc, bound_arg_src: ?LazySrcLoc, ) !*Module.Fn { @@ -7755,14 +7758,14 @@ fn resolveGenericInstantiationType( var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7822,13 +7825,13 @@ fn resolveGenericInstantiationType( var is_comptime = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_comptime = true; @@ -7868,8 +7871,8 @@ fn resolveGenericInstantiationType( new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator); // If the call evaluated to a return type that requires comptime, never mind // our generic instantiation. Instead we need to perform a comptime call. - const new_fn_info = new_decl.ty.fnInfo(); - if (try sema.typeRequiresComptime(new_fn_info.return_type)) { + const new_fn_info = mod.typeToFunc(new_decl.ty).?; + if (try sema.typeRequiresComptime(new_fn_info.return_type.toType())) { return error.ComptimeReturn; } // Similarly, if the call evaluated to a generic type we need to instead @@ -8969,19 +8972,19 @@ fn funcCommon( // the instantiation, which can depend on comptime parameters. // Related proposal: https://github.com/ziglang/zig/issues/11834 const cc_resolved = cc orelse .Unspecified; - const param_types = try sema.arena.alloc(Type, block.params.items.len); - const comptime_params = try sema.arena.alloc(bool, block.params.items.len); - for (block.params.items, 0..) |param, i| { + const param_types = try sema.arena.alloc(InternPool.Index, block.params.items.len); + var comptime_bits: u32 = 0; + for (param_types, block.params.items, 0..) |*dest_param_ty, param, i| { const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; break :blk @truncate(u1, noalias_bits >> index) != 0; }; - param_types[i] = param.ty; + dest_param_ty.* = param.ty.toIntern(); sema.analyzeParameter( block, .unneeded, param, - comptime_params, + &comptime_bits, i, &is_generic, cc_resolved, @@ -8994,7 +8997,7 @@ fn funcCommon( block, Module.paramSrc(src_node_offset, mod, decl, i), param, - comptime_params, + &comptime_bits, i, &is_generic, cc_resolved, @@ -9019,7 +9022,7 @@ fn funcCommon( else => |e| return e, }; - const return_type = if (!inferred_error_set or ret_poison) + const return_type: Type = if (!inferred_error_set or ret_poison) bare_return_type else blk: { try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); @@ -9047,7 +9050,9 @@ fn funcCommon( }; return sema.failWithOwnedErrorMsg(msg); } - if (!ret_poison and !Type.fnCallingConventionAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(return_type, .ret_ty)) { + if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and + !try sema.validateExternType(return_type, .ret_ty)) + { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{ return_type.fmt(sema.mod), @tagName(cc_resolved), @@ -9141,8 +9146,7 @@ fn funcCommon( return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{}); } if (is_generic and sema.no_partial_func_ty) return error.GenericPoison; - for (comptime_params) |ct| is_generic = is_generic or ct; - is_generic = is_generic or ret_ty_requires_comptime; + is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime; if (!is_generic and sema.wantErrorReturnTracing(return_type)) { // Make sure that StackTrace's fields are resolved so that the backend can @@ -9151,10 +9155,11 @@ fn funcCommon( _ = try sema.resolveTypeFields(unresolved_stack_trace_ty); } - break :fn_ty try Type.Tag.function.create(sema.arena, .{ + break :fn_ty try mod.funcType(.{ .param_types = param_types, - .comptime_params = comptime_params.ptr, - .return_type = return_type, + .noalias_bits = noalias_bits, + .comptime_bits = comptime_bits, + .return_type = return_type.toIntern(), .cc = cc_resolved, .cc_is_generic = cc == null, .alignment = alignment orelse 0, @@ -9164,7 +9169,6 @@ fn funcCommon( .is_var_args = var_args, .is_generic = is_generic, .is_noinline = is_noinline, - .noalias_bits = noalias_bits, }); }; @@ -9203,7 +9207,7 @@ fn funcCommon( return sema.addType(fn_ty); } - const is_inline = fn_ty.fnCallingConvention() == .Inline; + const is_inline = fn_ty.fnCallingConvention(mod) == .Inline; const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .none; const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == func_inst) blk: { @@ -9243,7 +9247,7 @@ fn analyzeParameter( block: *Block, param_src: LazySrcLoc, param: Block.Param, - comptime_params: []bool, + comptime_bits: *u32, i: usize, is_generic: *bool, cc: std.builtin.CallingConvention, @@ -9252,14 +9256,16 @@ fn analyzeParameter( ) !void { const mod = sema.mod; const requires_comptime = try sema.typeRequiresComptime(param.ty); - comptime_params[i] = param.is_comptime or requires_comptime; + if (param.is_comptime or requires_comptime) { + comptime_bits.* |= @as(u32, 1) << @intCast(u5, i); // TODO: handle cast error + } const this_generic = param.ty.isGenericPoison(); is_generic.* = is_generic.* or this_generic; const target = mod.getTarget(); - if (param.is_comptime and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { + if (param.is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } - if (this_generic and !sema.no_partial_func_ty and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { + if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } if (!param.ty.isValidParamType(mod)) { @@ -9275,7 +9281,7 @@ fn analyzeParameter( }; return sema.failWithOwnedErrorMsg(msg); } - if (!this_generic and !Type.fnCallingConventionAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { + if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{ param.ty.fmt(mod), @tagName(cc), @@ -15986,22 +15992,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ), .Fn => { // TODO: look into memoizing this result. - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; var params_anon_decl = try block.startAnonDecl(); defer params_anon_decl.deinit(); const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len); - for (param_vals, 0..) |*param_val, i| { - const param_ty = info.param_types[i]; - const is_generic = param_ty.isGenericPoison(); - const param_ty_val = if (is_generic) - Value.null - else - try Value.Tag.opt_payload.create( - params_anon_decl.arena(), - try Value.Tag.ty.create(params_anon_decl.arena(), try param_ty.copy(params_anon_decl.arena())), - ); + for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| { + const is_generic = param_ty == .generic_poison_type; + const param_ty_val = try mod.intern_pool.get(mod.gpa, .{ .opt = .{ + .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }), + .val = if (is_generic) .none else param_ty, + } }); const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; @@ -16015,7 +16017,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_noalias: bool, Value.makeBool(is_noalias), // type: ?type, - param_ty_val, + param_ty_val.toValue(), }; param_val.* = try Value.Tag.aggregate.create(params_anon_decl.arena(), param_fields); } @@ -16059,13 +16061,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }); }; - const ret_ty_opt = if (!info.return_type.isGenericPoison()) - try Value.Tag.opt_payload.create( - sema.arena, - try Value.Tag.ty.create(sema.arena, info.return_type), - ) - else - Value.null; + const ret_ty_opt = try mod.intern_pool.get(mod.gpa, .{ .opt = .{ + .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }), + .val = if (info.return_type == .generic_poison_type) .none else info.return_type, + } }); const callconv_ty = try sema.getBuiltinType("CallingConvention"); @@ -16080,7 +16079,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_var_args: bool, Value.makeBool(info.is_var_args), // return_type: ?type, - ret_ty_opt, + ret_ty_opt.toValue(), // args: []const Fn.Param, args_val, }; @@ -17788,7 +17787,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (inst_data.size != .One) { return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{}); } - const fn_align = elem_ty.fnInfo().alignment; + const fn_align = mod.typeToFunc(elem_ty).?.alignment; if (inst_data.flags.has_align and abi_align != 0 and fn_align != 0 and abi_align != fn_align) { @@ -18939,7 +18938,7 @@ fn zirReify( if (ptr_size != .One) { return sema.fail(block, src, "function pointers must be single pointers", .{}); } - const fn_align = elem_ty.fnInfo().alignment; + const fn_align = mod.typeToFunc(elem_ty).?.alignment; if (abi_align != 0 and fn_align != 0 and abi_align != fn_align) { @@ -19483,12 +19482,10 @@ fn zirReify( const args_slice_val = args_val.castTag(.slice).?.data; const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod)); - const param_types = try sema.arena.alloc(Type, args_len); - const comptime_params = try sema.arena.alloc(bool, args_len); + const param_types = try sema.arena.alloc(InternPool.Index, args_len); var noalias_bits: u32 = 0; - var i: usize = 0; - while (i < args_len) : (i += 1) { + for (param_types, 0..) |*param_type, i| { const arg = try args_slice_val.ptr.elemValue(mod, i); const arg_val = arg.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here @@ -19505,25 +19502,22 @@ fn zirReify( const param_type_val = param_type_opt_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); - const param_type = try param_type_val.toType().copy(sema.arena); + param_type.* = param_type_val.ip_index; if (arg_is_noalias) { - if (!param_type.isPtrAtRuntime(mod)) { + if (!param_type.toType().isPtrAtRuntime(mod)) { return sema.fail(block, src, "non-pointer parameter declared noalias", .{}); } noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse return sema.fail(block, src, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{})); } - - param_types[i] = param_type; - comptime_params[i] = false; } - var fn_info = Type.Payload.Function.Data{ + const ty = try mod.funcType(.{ .param_types = param_types, - .comptime_params = comptime_params.ptr, + .comptime_bits = 0, .noalias_bits = noalias_bits, - .return_type = try return_type.toType().copy(sema.arena), + .return_type = return_type.toIntern(), .alignment = alignment, .cc = cc, .is_var_args = is_var_args, @@ -19533,9 +19527,7 @@ fn zirReify( .cc_is_generic = false, .section_is_generic = false, .addrspace_is_generic = false, - }; - - const ty = try Type.Tag.function.create(sema.arena, fn_info); + }); return sema.addType(ty); }, .Frame => return sema.failWithUseOfAsync(block, src), @@ -23435,7 +23427,7 @@ fn explainWhyTypeIsComptimeInner( .Pointer => { const elem_ty = ty.elemType2(mod); if (elem_ty.zigTypeTag(mod) == .Fn) { - const fn_info = elem_ty.fnInfo(); + const fn_info = mod.typeToFunc(elem_ty).?; if (fn_info.is_generic) { try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{}); } @@ -23443,7 +23435,7 @@ fn explainWhyTypeIsComptimeInner( .Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}), else => {}, } - if (fn_info.return_type.comptimeOnly(mod)) { + if (fn_info.return_type.toType().comptimeOnly(mod)) { try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{}); } return; @@ -23543,10 +23535,10 @@ fn validateExternType( const target = sema.mod.getTarget(); // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. // The goal is to experiment with more integrated CPU/GPU code. - if (ty.fnCallingConvention() == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) { + if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) { return true; } - return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention()); + return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod)); }, .Enum => { return sema.validateExternType(try ty.intTagType(mod), position); @@ -23619,7 +23611,7 @@ fn explainWhyTypeIsNotExtern( try mod.errNoteNonLazy(src_loc, msg, "use '*const ' to make a function pointer type", .{}); return; } - switch (ty.fnCallingConvention()) { + switch (ty.fnCallingConvention(mod)) { .Unspecified => try mod.errNoteNonLazy(src_loc, msg, "extern function must specify calling convention", .{}), .Async => try mod.errNoteNonLazy(src_loc, msg, "async function cannot be extern", .{}), .Inline => try mod.errNoteNonLazy(src_loc, msg, "inline function cannot be extern", .{}), @@ -24548,10 +24540,10 @@ fn fieldCallBind( try sema.addReferencedBy(block, src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); const decl_type = sema.typeOf(decl_val); - if (decl_type.zigTypeTag(mod) == .Fn and - decl_type.fnParamLen() >= 1) - { - const first_param_type = decl_type.fnParamType(0); + if (mod.typeToFunc(decl_type)) |func_type| f: { + if (func_type.param_types.len == 0) break :f; + + const first_param_type = func_type.param_types[0].toType(); // zig fmt: off if (first_param_type.isGenericPoison() or ( first_param_type.zigTypeTag(mod) == .Pointer and @@ -27090,8 +27082,9 @@ fn coerceInMemoryAllowedFns( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { - const dest_info = dest_ty.fnInfo(); - const src_info = src_ty.fnInfo(); + const mod = sema.mod; + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; if (dest_info.is_var_args != src_info.is_var_args) { return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args }; @@ -27108,13 +27101,13 @@ fn coerceInMemoryAllowedFns( } }; } - if (!src_info.return_type.isNoReturn()) { - const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type, src_info.return_type, false, target, dest_src, src_src); + if (src_info.return_type != .noreturn_type) { + const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type.toType(), src_info.return_type.toType(), false, target, dest_src, src_src); if (rt != .ok) { return InMemoryCoercionResult{ .fn_return_type = .{ .child = try rt.dupe(sema.arena), - .actual = src_info.return_type, - .wanted = dest_info.return_type, + .actual = src_info.return_type.toType(), + .wanted = dest_info.return_type.toType(), } }; } } @@ -27134,22 +27127,23 @@ fn coerceInMemoryAllowedFns( } for (dest_info.param_types, 0..) |dest_param_ty, i| { - const src_param_ty = src_info.param_types[i]; + const src_param_ty = src_info.param_types[i].toType(); - if (dest_info.comptime_params[i] != src_info.comptime_params[i]) { + const i_small = @intCast(u5, i); + if (dest_info.paramIsComptime(i_small) != src_info.paramIsComptime(i_small)) { return InMemoryCoercionResult{ .fn_param_comptime = .{ .index = i, - .wanted = dest_info.comptime_params[i], + .wanted = dest_info.paramIsComptime(i_small), } }; } // Note: Cast direction is reversed here. - const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src); + const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty.toType(), false, target, dest_src, src_src); if (param != .ok) { return InMemoryCoercionResult{ .fn_param = .{ .child = try param.dupe(sema.arena), .actual = src_param_ty, - .wanted = dest_param_ty, + .wanted = dest_param_ty.toType(), .index = i, } }; } @@ -31205,17 +31199,17 @@ fn resolvePeerTypes( return chosen_ty; } -pub fn resolveFnTypes(sema: *Sema, fn_info: Type.Payload.Function.Data) CompileError!void { +pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileError!void { const mod = sema.mod; - try sema.resolveTypeFully(fn_info.return_type); + try sema.resolveTypeFully(fn_info.return_type.toType()); - if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError(mod)) { + if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.toType().isError(mod)) { // Ensure the type exists so that backends can assume that. _ = try sema.getBuiltinType("StackTrace"); } for (fn_info.param_types) |param_ty| { - try sema.resolveTypeFully(param_ty); + try sema.resolveTypeFully(param_ty.toType()); } } @@ -31286,16 +31280,16 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { return sema.resolveTypeLayout(payload_ty); }, .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (info.is_generic) { // Resolving of generic function types is deferred to when // the function is instantiated. return; } for (info.param_types) |param_ty| { - try sema.resolveTypeLayout(param_ty); + try sema.resolveTypeLayout(param_ty.toType()); } - try sema.resolveTypeLayout(info.return_type); + try sema.resolveTypeLayout(info.return_type.toType()); }, else => {}, } @@ -31615,15 +31609,13 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_merged, => false, - .function => true, - .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .pointer => { const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; + return mod.typeToFunc(child_ty).?.is_generic; } else { return sema.resolveTypeRequiresComptime(child_ty); } @@ -31644,7 +31636,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; + return mod.typeToFunc(child_ty).?.is_generic; } else { return sema.resolveTypeRequiresComptime(child_ty); } @@ -31653,6 +31645,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), .opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()), .error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()), + .func_type => true, + .simple_type => |t| switch (t) { .f16, .f32, @@ -31799,16 +31793,16 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { }, .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload()), .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (info.is_generic) { // Resolving of generic function types is deferred to when // the function is instantiated. return; } for (info.param_types) |param_ty| { - try sema.resolveTypeFully(param_ty); + try sema.resolveTypeFully(param_ty.toType()); } - try sema.resolveTypeFully(info.return_type); + try sema.resolveTypeFully(info.return_type.toType()); }, else => {}, } @@ -31881,7 +31875,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .none => return ty, .u1_type, - .u5_type, .u8_type, .i8_type, .u16_type, @@ -31941,8 +31934,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .zero_u8 => unreachable, .one => unreachable, .one_usize => unreachable, - .one_u5 => unreachable, - .four_u5 => unreachable, + .one_u8 => unreachable, + .four_u8 => unreachable, .negative_one => unreachable, .calling_convention_c => unreachable, .calling_convention_inline => unreachable, @@ -32083,14 +32076,14 @@ fn resolveInferredErrorSet( // `*Module.Fn`. Not only is the function not relevant to the inferred error set // in this case, it may be a generic function which would cause an assertion failure // if we called `ensureFuncBodyAnalyzed` on it here. - const ies_func_owner_decl = sema.mod.declPtr(ies.func.owner_decl); - const ies_func_info = ies_func_owner_decl.ty.fnInfo(); + const ies_func_owner_decl = mod.declPtr(ies.func.owner_decl); + const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, // because inline function does not create a new declaration, and the ies has been filled with analyzeCall, // so here we can simply skip this case. - if (ies_func_info.return_type.isGenericPoison()) { + if (ies_func_info.return_type == .generic_poison_type) { assert(ies_func_info.cc == .Inline); - } else if (ies_func_info.return_type.errorUnionSet().castTag(.error_set_inferred).?.data == ies) { + } else if (ies_func_info.return_type.toType().errorUnionSet().castTag(.error_set_inferred).?.data == ies) { if (ies_func_info.is_generic) { const msg = msg: { const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{}); @@ -32285,7 +32278,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const prev_field_index = struct_obj.fields.getIndex(field_name).?; const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index }); - try sema.mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); + try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "struct declared here", .{}); break :msg msg; }; @@ -32387,7 +32380,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field.ty, .struct_field); @@ -32402,7 +32395,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field.ty); @@ -32580,7 +32573,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) { - return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(sema.mod)}); + return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(mod)}); } if (fields_len > 0) { @@ -32590,7 +32583,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(&block_scope, tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{ - int_tag_ty.fmt(sema.mod), + int_tag_ty.fmt(mod), fields_len - 1, }); break :msg msg; @@ -32605,7 +32598,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { union_obj.tag_ty = provided_ty; const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) { .enum_type => |x| x, - else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}), + else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}), }; // The fields of the union must match the enum exactly. // A flag per field is used to check for missing and extraneous fields. @@ -32705,7 +32698,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; const other_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = gop.index }).lazy; const msg = msg: { - const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, sema.mod)}); + const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, mod)}); errdefer msg.destroy(gpa); try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{}); break :msg msg; @@ -32751,7 +32744,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const prev_field_index = union_obj.fields.getIndex(field_name).?; const prev_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = prev_field_index }).lazy; - try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{}); + try mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "union declared here", .{}); break :msg msg; }; @@ -32766,7 +32759,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .range = .type, }).lazy; const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{ - field_name, union_obj.tag_ty.fmt(sema.mod), + field_name, union_obj.tag_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); @@ -32800,7 +32793,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .union_field); @@ -32815,7 +32808,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty); @@ -33060,7 +33053,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set, .error_set_merged, .error_union, - .function, .error_set_inferred, .anyframe_T, .pointer, @@ -33087,7 +33079,12 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } }, - .ptr_type => null, + + .ptr_type, + .error_union_type, + .func_type, + => null, + .array_type => |array_type| { if (array_type.len == 0) return Value.initTag(.empty_array); @@ -33102,13 +33099,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; }, .opt_type => |child| { - if (child.toType().isNoReturn()) { - return Value.null; + if (child == .noreturn_type) { + return try mod.nullValue(ty); } else { return null; } }, - .error_union_type => null, + .simple_type => |t| switch (t) { .f16, .f32, @@ -33674,15 +33671,13 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_merged, => false, - .function => true, - .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .pointer => { const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; + return mod.typeToFunc(child_ty).?.is_generic; } else { return sema.typeRequiresComptime(child_ty); } @@ -33703,7 +33698,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; + return mod.typeToFunc(child_ty).?.is_generic; } else { return sema.typeRequiresComptime(child_ty); } @@ -33714,6 +33709,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_union_type => |error_union_type| { return sema.typeRequiresComptime(error_union_type.payload_type.toType()); }, + .func_type => true, + .simple_type => |t| return switch (t) { .f16, .f32, @@ -33870,7 +33867,8 @@ fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 { /// Synchronize logic with `Type.isFnOrHasRuntimeBits`. pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - const fn_info = ty.fnInfo(); + const mod = sema.mod; + const fn_info = mod.typeToFunc(ty).?; if (fn_info.is_generic) return false; if (fn_info.is_var_args) return true; switch (fn_info.cc) { @@ -33878,7 +33876,7 @@ pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { .Inline => return false, else => {}, } - if (try sema.typeRequiresComptime(fn_info.return_type)) { + if (try sema.typeRequiresComptime(fn_info.return_type.toType())) { return false; } return true; diff --git a/src/Zir.zig b/src/Zir.zig index 136920d75d6a..ec3288620c42 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2052,7 +2052,6 @@ pub const Inst = struct { /// and `[]Ref`. pub const Ref = enum(u32) { u1_type = @enumToInt(InternPool.Index.u1_type), - u5_type = @enumToInt(InternPool.Index.u5_type), u8_type = @enumToInt(InternPool.Index.u8_type), i8_type = @enumToInt(InternPool.Index.i8_type), u16_type = @enumToInt(InternPool.Index.u16_type), @@ -2121,8 +2120,8 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), - one_u5 = @enumToInt(InternPool.Index.one_u5), - four_u5 = @enumToInt(InternPool.Index.four_u5), + one_u8 = @enumToInt(InternPool.Index.one_u8), + four_u8 = @enumToInt(InternPool.Index.four_u8), negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 3e893411fccb..dea5b6312974 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -472,7 +472,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { fn gen(self: *Self) !void { const mod = self.bin_file.options.module.?; - const cc = self.fn_type.fnCallingConvention(); + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // stp fp, lr, [sp, #-16]! _ = try self.addInst(.{ @@ -1146,7 +1146,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the @@ -4271,7 +4271,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (info.return_value == .stack_offset) { log.debug("airCall: return by reference", .{}); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); @@ -4428,10 +4428,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4460,10 +4460,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4483,7 +4484,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const mod = self.bin_file.options.module.?; const abi_size = @intCast(u32, ret_ty.abiSize(mod)); const abi_align = ret_ty.abiAlignment(mod); @@ -6226,12 +6226,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -6239,8 +6238,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -6271,8 +6269,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(mod)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size == 0) { result.args[i] = .{ .none = {} }; continue; @@ -6280,14 +6278,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // We round up NCRN only for non-Apple platforms which allow the 16-byte aligned // values to spread across odd-numbered registers. - if (ty.abiAlignment(mod) == 16 and !self.target.isDarwin()) { + if (ty.toType().abiAlignment(mod) == 16 and !self.target.isDarwin()) { // Round up NCRN to the next even number ncrn += ncrn % 2; } if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) { if (param_size <= 8) { - result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty) }; + result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty.toType()) }; ncrn += 1; } else { return self.fail("TODO MCValues with multiple registers", .{}); @@ -6298,7 +6296,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { ncrn = 8; // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided // that the entire stack space consumed by the arguments is 8-byte aligned. - if (ty.abiAlignment(mod) == 8) { + if (ty.toType().abiAlignment(mod) == 8) { if (nsaa % 8 != 0) { nsaa += 8 - (nsaa % 8); } @@ -6336,10 +6334,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; - for (param_types, 0..) |ty, i| { - if (ty.abiSize(mod) > 0) { - const param_size = @intCast(u32, ty.abiSize(mod)); - const param_alignment = ty.abiAlignment(mod); + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 5cc165fdfe73..e84c4de981c9 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -478,7 +478,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { fn gen(self: *Self) !void { const mod = self.bin_file.options.module.?; - const cc = self.fn_type.fnCallingConvention(); + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // push {fp, lr} const push_reloc = try self.addNop(); @@ -1123,7 +1123,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the @@ -4250,7 +4250,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // untouched by the parameter passing code const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: { log.debug("airCall: return by reference", .{}); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); @@ -4350,7 +4350,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (RegisterManager.indexOfRegIntoTracked(reg) == null) { // Save function return value into a tracked register log.debug("airCall: copying {} as it is not tracked", .{reg}); - const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(), info.return_value); + const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(mod), info.return_value); break :result MCValue{ .register = new_reg }; } }, @@ -4374,10 +4374,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4406,10 +4406,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4429,7 +4430,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const mod = self.bin_file.options.module.?; const abi_size = @intCast(u32, ret_ty.abiSize(mod)); const abi_align = ret_ty.abiAlignment(mod); @@ -6171,12 +6171,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -6184,8 +6183,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -6219,11 +6217,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - for (param_types, 0..) |ty, i| { - if (ty.abiAlignment(mod) == 8) + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiAlignment(mod) == 8) ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); - const param_size = @intCast(u32, ty.abiSize(mod)); + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (param_size <= 4) { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; @@ -6235,7 +6233,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 4; - if (ty.abiAlignment(mod) == 8) + if (ty.toType().abiAlignment(mod) == 8) nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); result.args[i] = .{ .stack_argument_offset = nsaa }; @@ -6269,10 +6267,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; - for (param_types, 0..) |ty, i| { - if (ty.abiSize(mod) > 0) { - const param_size = @intCast(u32, ty.abiSize(mod)); - const param_alignment = ty.abiAlignment(mod); + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5cf621488e9f..faa2b2b7d034 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -347,7 +347,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // TODO Finish function prologue and epilogue for riscv64. @@ -1803,7 +1804,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn ret(self: *Self, mcv: MCValue) !void { - const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); // Just add space for an instruction, patch this later const index = try self.addInst(.{ @@ -2621,12 +2623,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -2634,8 +2635,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -2655,8 +2655,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var next_stack_offset: u32 = 0; const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 }; - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(mod)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 0677b72f1a96..9d58dd9f29b3 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -363,7 +363,8 @@ pub fn generate( } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // TODO Finish function prologue and epilogue for sparc64. @@ -4458,12 +4459,11 @@ fn realStackOffset(off: u32) u32 { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -4471,8 +4471,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -4495,8 +4494,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) .callee => abi.c_abi_int_param_regs_callee_view, }; - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(mod)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -4580,7 +4579,8 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } fn ret(self: *Self, mcv: MCValue) !void { - const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); // Just add space for a branch instruction, patch this later diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 6ae516371409..a95026484028 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1145,7 +1145,7 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { fn genFunctype( gpa: Allocator, cc: std.builtin.CallingConvention, - params: []const Type, + params: []const InternPool.Index, return_type: Type, mod: *Module, ) !wasm.Type { @@ -1170,7 +1170,8 @@ fn genFunctype( } // param types - for (params) |param_type| { + for (params) |param_type_ip| { + const param_type = param_type_ip.toType(); if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; switch (cc) { @@ -1234,9 +1235,9 @@ pub fn generate( } fn genFunc(func: *CodeGen) InnerError!void { - const fn_info = func.decl.ty.fnInfo(); const mod = func.bin_file.base.options.module.?; - var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod); + const fn_info = mod.typeToFunc(func.decl.ty).?; + var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); _ = try func.bin_file.storeDeclType(func.decl_index, func_type); @@ -1345,10 +1346,8 @@ const CallWValues = struct { fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues { const mod = func.bin_file.base.options.module.?; - const cc = fn_ty.fnCallingConvention(); - const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen()); - defer func.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallWValues = .{ .args = &.{}, .return_value = .none, @@ -1360,8 +1359,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV // Check if we store the result as a pointer to the stack rather than // by value - const fn_info = fn_ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { + if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { // the sret arg will be passed as first argument, therefore we // set the `return_value` before allocating locals for regular args. result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } }; @@ -1370,8 +1368,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV switch (cc) { .Unspecified => { - for (param_types) |ty| { - if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { + for (fn_info.param_types) |ty| { + if (!ty.toType().hasRuntimeBitsIgnoreComptime(mod)) { continue; } @@ -1380,8 +1378,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV } }, .C => { - for (param_types) |ty| { - const ty_classes = abi.classifyType(ty, mod); + for (fn_info.param_types) |ty| { + const ty_classes = abi.classifyType(ty.toType(), mod); for (ty_classes) |class| { if (class == .none) continue; try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } }); @@ -2095,11 +2093,11 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { } fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const fn_info = func.decl.ty.fnInfo(); - const ret_ty = fn_info.return_type; - const mod = func.bin_file.base.options.module.?; + const fn_info = mod.typeToFunc(func.decl.ty).?; + const ret_ty = fn_info.return_type.toType(); // result must be stored in the stack and we return a pointer // to the stack instead @@ -2146,8 +2144,8 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try func.allocStack(Type.usize); // create pointer to void } - const fn_info = func.decl.ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { + const fn_info = mod.typeToFunc(func.decl.ty).?; + if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { break :result func.return_value; } @@ -2163,12 +2161,12 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(un_op); const ret_ty = func.typeOf(un_op).childType(mod); - const fn_info = func.decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(func.decl.ty).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (ret_ty.isError(mod)) { try func.addImm32(0); } - } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { + } else if (!firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { // leave on the stack _ = try func.load(operand, ret_ty, 0); } @@ -2191,9 +2189,9 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif .Pointer => ty.childType(mod), else => unreachable, }; - const ret_ty = fn_ty.fnReturnType(); - const fn_info = fn_ty.fnInfo(); - const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod); + const ret_ty = fn_ty.fnReturnType(mod); + const fn_info = mod.typeToFunc(fn_ty).?; + const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod); const callee: ?Decl.Index = blk: { const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; @@ -2203,8 +2201,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif break :blk function.data.owner_decl; } else if (func_val.castTag(.extern_fn)) |extern_fn| { const ext_decl = mod.declPtr(extern_fn.data.owner_decl); - const ext_info = ext_decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, mod); + const ext_info = mod.typeToFunc(ext_decl.ty).?; + var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl); const atom = func.bin_file.getAtomPtr(atom_index); @@ -2235,7 +2233,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const arg_ty = func.typeOf(arg); if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val); + try func.lowerArg(mod.typeToFunc(fn_ty).?.cc, arg_ty, arg_val); } if (callee) |direct| { @@ -2248,7 +2246,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const operand = try func.resolveInst(pl_op.operand); try func.emitWValue(operand); - var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod); + var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod); defer fn_type.deinit(func.gpa); const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type); @@ -2264,7 +2262,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else if (first_param_sret) { break :result_value sret; // TODO: Make this less fragile and optimize - } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) { + } else if (mod.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) { const result_local = try func.allocLocal(ret_ty); try func.addLabel(.local_set, result_local.local.value); const scalar_type = abi.scalarType(ret_ty, mod); @@ -2528,7 +2526,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const arg_index = func.arg_index; const arg = func.args[arg_index]; - const cc = func.decl.ty.fnInfo().cc; + const cc = mod.typeToFunc(func.decl.ty).?.cc; const arg_ty = func.typeOfIndex(inst); if (cc == .C) { const arg_classes = abi.classifyType(arg_ty, mod); @@ -2647,9 +2645,9 @@ fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) Inner } switch (op) { - .mul => return func.callIntrinsic("__multi3", &.{ ty, ty }, ty, &.{ lhs, rhs }), - .shr => return func.callIntrinsic("__lshrti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), - .shl => return func.callIntrinsic("__ashlti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), + .mul => return func.callIntrinsic("__multi3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), + .shr => return func.callIntrinsic("__lshrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), + .shl => return func.callIntrinsic("__ashlti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), .xor => { const result = try func.allocStack(ty); try func.emitWValue(result); @@ -2839,7 +2837,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In }; // fma requires three operands - var param_types_buffer: [3]Type = .{ ty, ty, ty }; + var param_types_buffer: [3]InternPool.Index = .{ ty.ip_index, ty.ip_index, ty.ip_index }; const param_types = param_types_buffer[0..args.len]; return func.callIntrinsic(fn_name, param_types, ty, args); } @@ -5298,7 +5296,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! // call __extendhfsf2(f16) f32 const f32_result = try func.callIntrinsic( "__extendhfsf2", - &.{Type.f16}, + &.{.f16_type}, Type.f32, &.{operand}, ); @@ -5316,7 +5314,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! target_util.compilerRtFloatAbbrev(wanted_bits), }) catch unreachable; - return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand}); + return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand}); } fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { @@ -5347,7 +5345,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } else operand; // call __truncsfhf2(f32) f16 - return func.callIntrinsic("__truncsfhf2", &.{Type.f32}, Type.f16, &.{op}); + return func.callIntrinsic("__truncsfhf2", &.{.f32_type}, Type.f16, &.{op}); } var fn_name_buf: [12]u8 = undefined; @@ -5356,7 +5354,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro target_util.compilerRtFloatAbbrev(wanted_bits), }) catch unreachable; - return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand}); + return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand}); } fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { @@ -5842,7 +5840,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, + &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ lhs, lhs_shifted, rhs, rhs_shifted }, ); @@ -5866,19 +5864,19 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mul1 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, + &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ lhs_lsb, zero, rhs_msb, zero }, ); const mul2 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, + &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ rhs_lsb, zero, lhs_msb, zero }, ); const mul3 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, + &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ lhs_msb, zero, rhs_msb, zero }, ); @@ -5977,7 +5975,7 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // call to compiler-rt `fn fmaf(f32, f32, f32) f32` var result = try func.callIntrinsic( "fmaf", - &.{ Type.f32, Type.f32, Type.f32 }, + &.{ .f32_type, .f32_type, .f32_type }, Type.f32, &.{ rhs_ext, lhs_ext, addend_ext }, ); @@ -6707,7 +6705,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn callIntrinsic( func: *CodeGen, name: []const u8, - param_types: []const Type, + param_types: []const InternPool.Index, return_type: Type, args: []const WValue, ) InnerError!WValue { @@ -6735,8 +6733,8 @@ fn callIntrinsic( // Lower all arguments to the stack before we call our function for (args, 0..) |arg, arg_i| { assert(!(want_sret_param and arg == .stack)); - assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime(mod)); - try func.lowerArg(.C, param_types[arg_i], arg); + assert(param_types[arg_i].toType().hasRuntimeBitsIgnoreComptime(mod)); + try func.lowerArg(.C, param_types[arg_i].toType(), arg); } // Actually call our intrinsic @@ -6938,7 +6936,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.end)); const slice_ty = Type.const_slice_u8_sentinel_0; - const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod); + const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 30c324836008..149f872c9aee 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -26,6 +26,7 @@ const Liveness = @import("../../Liveness.zig"); const Lower = @import("Lower.zig"); const Mir = @import("Mir.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const Target = std.Target; const Type = @import("../../type.zig").Type; const TypedValue = @import("../../TypedValue.zig"); @@ -697,7 +698,8 @@ pub fn generate( FrameAlloc.init(.{ .size = 0, .alignment = 1 }), ); - var call_info = function.resolveCallingConventionValues(fn_type, &.{}, .args_frame) catch |err| switch (err) { + const fn_info = mod.typeToFunc(fn_type).?; + var call_info = function.resolveCallingConventionValues(fn_info, &.{}, .args_frame) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, error.OutOfRegisters => return Result{ .fail = try ErrorMsg.create( @@ -1566,7 +1568,7 @@ fn asmMemoryRegisterImmediate( fn gen(self: *Self) InnerError!void { const mod = self.bin_file.options.module.?; - const cc = self.fn_type.fnCallingConvention(); + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { try self.asmRegister(.{ ._, .push }, .rbp); const backpatch_push_callee_preserved_regs = try self.asmPlaceholder(); @@ -8042,7 +8044,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier else => unreachable, }; - var info = try self.resolveCallingConventionValues(fn_ty, args[fn_ty.fnParamLen()..], .call_frame); + const fn_info = mod.typeToFunc(fn_ty).?; + + var info = try self.resolveCallingConventionValues(fn_info, args[fn_info.param_types.len..], .call_frame); defer info.deinit(self); // We need a properly aligned and sized call frame to be able to call this function. @@ -8083,7 +8087,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ret_lock = switch (info.return_value.long) { .none, .unreach => null, .indirect => |reg_off| lock: { - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_info.return_type.toType(); const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, mod)); try self.genSetReg(reg_off.reg, Type.usize, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, @@ -8199,9 +8203,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv.short) { .none => {}, .register => try self.genCopy(ret_ty, self.ret_mcv.short, operand), @@ -11683,18 +11688,23 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues( self: *Self, - fn_ty: Type, + fn_info: InternPool.Key.FuncType, var_args: []const Air.Inst.Ref, stack_frame_base: FrameIndex, ) !CallMCValues { const mod = self.bin_file.options.module.?; - const cc = fn_ty.fnCallingConvention(); - const param_len = fn_ty.fnParamLen(); - const param_types = try self.gpa.alloc(Type, param_len + var_args.len); + const cc = fn_info.cc; + const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len); defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + + for (param_types[0..fn_info.param_types.len], fn_info.param_types) |*dest, src| { + dest.* = src.toType(); + } // TODO: promote var arg types - for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.typeOf(arg); + for (param_types[fn_info.param_types.len..], var_args) |*param_ty, arg| { + param_ty.* = self.typeOf(arg); + } + var result: CallMCValues = .{ .args = try self.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. @@ -11704,7 +11714,7 @@ fn resolveCallingConventionValues( }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_info.return_type.toType(); switch (cc) { .Naked => { diff --git a/src/codegen.zig b/src/codegen.zig index 90b6bfccf2fc..9eb294feacd2 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1081,7 +1081,7 @@ fn genDeclRef( // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? if (tv.ty.castPtrToFn(mod)) |fn_ty| { - if (fn_ty.fnInfo().is_generic) { + if (mod.typeToFunc(fn_ty).?.is_generic) { return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) }); } } else if (tv.ty.zigTypeTag(mod) == .Pointer) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index f45c17822386..601382c1fdf4 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1507,7 +1507,7 @@ pub const DeclGen = struct { const fn_decl = mod.declPtr(fn_decl_index); const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind); - const fn_info = fn_decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(fn_decl.ty).?; if (fn_info.cc == .Naked) { switch (kind) { .forward => try w.writeAll("zig_naked_decl "), @@ -1517,7 +1517,7 @@ pub const DeclGen = struct { } if (fn_decl.val.castTag(.function)) |func_payload| if (func_payload.data.is_cold) try w.writeAll("zig_cold "); - if (fn_info.return_type.ip_index == .noreturn_type) try w.writeAll("zig_noreturn "); + if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( dg.decl_index, @@ -3455,7 +3455,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { } else { try reap(f, inst, &.{un_op}); // Not even allowed to return void in a naked function. - if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention() != .Naked else true) + if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention(mod) != .Naked else true) try writer.writeAll("return;\n"); } return .none; @@ -4094,7 +4094,7 @@ fn airCall( ) !CValue { const mod = f.object.dg.module; // Not even allowed to call panic in a naked function. - if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none; const gpa = f.object.dg.gpa; const writer = f.object.writer(); @@ -4143,7 +4143,7 @@ fn airCall( else => unreachable, }; - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod); const result_local = result: { @@ -4622,8 +4622,9 @@ fn airFence(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnreach(f: *Function) !CValue { + const mod = f.object.dg.module; // Not even allowed to call unreachable in a naked function. - if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none; try f.object.writer().writeAll("zig_unreachable();\n"); return .none; diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index b51d81a30b22..a2af395a984b 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1720,7 +1720,7 @@ pub const CType = extern union { .Opaque => self.init(.void), .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (!info.is_generic) { if (lookup.isMutable()) { const param_kind: Kind = switch (kind) { @@ -1728,10 +1728,10 @@ pub const CType = extern union { .complete, .parameter, .global => .parameter, .payload => unreachable, }; - _ = try lookup.typeToIndex(info.return_type, param_kind); + _ = try lookup.typeToIndex(info.return_type.toType(), param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; - _ = try lookup.typeToIndex(param_type, param_kind); + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + _ = try lookup.typeToIndex(param_type.toType(), param_kind); } } self.init(if (info.is_var_args) .varargs_function else .function); @@ -2013,7 +2013,7 @@ pub const CType = extern union { .function, .varargs_function, => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const param_kind: Kind = switch (kind) { .forward, .forward_parameter => .forward_parameter, @@ -2023,21 +2023,21 @@ pub const CType = extern union { var c_params_len: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; c_params_len += 1; } const params_pl = try arena.alloc(Index, c_params_len); var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; - params_pl[c_param_i] = store.set.typeToIndex(param_type, mod, param_kind).?; + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + params_pl[c_param_i] = store.set.typeToIndex(param_type.toType(), mod, param_kind).?; c_param_i += 1; } const fn_pl = try arena.create(Payload.Function); fn_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .return_type = store.set.typeToIndex(info.return_type, mod, param_kind).?, + .return_type = store.set.typeToIndex(info.return_type.toType(), mod, param_kind).?, .param_types = params_pl, } }; return initPayload(fn_pl); @@ -2145,7 +2145,7 @@ pub const CType = extern union { => { if (ty.zigTypeTag(mod) != .Fn) return false; - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const data = cty.cast(Payload.Function).?.data; const param_kind: Kind = switch (self.kind) { @@ -2154,18 +2154,18 @@ pub const CType = extern union { .payload => unreachable, }; - if (!self.eqlRecurse(info.return_type, data.return_type, param_kind)) + if (!self.eqlRecurse(info.return_type.toType(), data.return_type, param_kind)) return false; var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; if (c_param_i >= data.param_types.len) return false; const param_cty = data.param_types[c_param_i]; c_param_i += 1; - if (!self.eqlRecurse(param_type, param_cty, param_kind)) + if (!self.eqlRecurse(param_type.toType(), param_cty, param_kind)) return false; } return c_param_i == data.param_types.len; @@ -2258,7 +2258,7 @@ pub const CType = extern union { .function, .varargs_function, => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const param_kind: Kind = switch (self.kind) { .forward, .forward_parameter => .forward_parameter, @@ -2266,10 +2266,10 @@ pub const CType = extern union { .payload => unreachable, }; - self.updateHasherRecurse(hasher, info.return_type, param_kind); + self.updateHasherRecurse(hasher, info.return_type.toType(), param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; - self.updateHasherRecurse(hasher, param_type, param_kind); + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + self.updateHasherRecurse(hasher, param_type.toType(), param_kind); } }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 3289d389b410..476f73cbe432 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -954,17 +954,17 @@ pub const Object = struct { builder.positionBuilderAtEnd(entry_block); // This gets the LLVM values from the function and stores them in `dg.args`. - const fn_info = decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(decl.ty).?; const sret = firstParamSRet(fn_info, mod); const ret_ptr = if (sret) llvm_func.getParam(0) else null; const gpa = dg.gpa; - if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type)) |s| switch (s) { + if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type.toType())) |s| switch (s) { .signed => dg.addAttr(llvm_func, 0, "signext"), .unsigned => dg.addAttr(llvm_func, 0, "zeroext"), }; - const err_return_tracing = fn_info.return_type.isError(mod) and + const err_return_tracing = fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing; const err_ret_trace = if (err_return_tracing) @@ -986,7 +986,7 @@ pub const Object = struct { .byval => { assert(!it.byval_attr); const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); const param = llvm_func.getParam(llvm_arg_i); try args.ensureUnusedCapacity(1); @@ -1005,7 +1005,7 @@ pub const Object = struct { llvm_arg_i += 1; }, .byref => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); const alignment = param_ty.abiAlignment(mod); @@ -1024,7 +1024,7 @@ pub const Object = struct { } }, .byref_mut => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); const alignment = param_ty.abiAlignment(mod); @@ -1044,7 +1044,7 @@ pub const Object = struct { }, .abi_sized_int => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1071,7 +1071,7 @@ pub const Object = struct { }, .slice => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, it.zig_index - 1)) |i| { @@ -1104,7 +1104,7 @@ pub const Object = struct { .multiple_llvm_types => { assert(!it.byval_attr); const field_types = it.llvm_types_buffer[0..it.llvm_types_len]; - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); @@ -1135,7 +1135,7 @@ pub const Object = struct { args.appendAssumeCapacity(casted); }, .float_array => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1153,7 +1153,7 @@ pub const Object = struct { } }, .i32_array, .i64_array => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1182,7 +1182,7 @@ pub const Object = struct { const line_number = decl.src_line + 1; const is_internal_linkage = decl.val.tag() != .extern_fn and !mod.decl_exports.contains(decl_index); - const noret_bit: c_uint = if (fn_info.return_type.isNoReturn()) + const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type) llvm.DIFlags.NoReturn else 0; @@ -2331,26 +2331,26 @@ pub const Object = struct { return full_di_ty; }, .Fn => { - const fn_info = ty.fnInfo(); + const fn_info = mod.typeToFunc(ty).?; var param_di_types = std.ArrayList(*llvm.DIType).init(gpa); defer param_di_types.deinit(); // Return type goes first. - if (fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) { const sret = firstParamSRet(fn_info, mod); - const di_ret_ty = if (sret) Type.void else fn_info.return_type; + const di_ret_ty = if (sret) Type.void else fn_info.return_type.toType(); try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full)); if (sret) { - const ptr_ty = try mod.singleMutPtrType(fn_info.return_type); + const ptr_ty = try mod.singleMutPtrType(fn_info.return_type.toType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } } else { try param_di_types.append(try o.lowerDebugType(Type.void, .full)); } - if (fn_info.return_type.isError(mod) and + if (fn_info.return_type.toType().isError(mod) and o.module.comp.bin_file.options.error_return_tracing) { const ptr_ty = try mod.singleMutPtrType(o.getStackTraceType()); @@ -2358,13 +2358,13 @@ pub const Object = struct { } for (fn_info.param_types) |param_ty| { - if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; - if (isByRef(param_ty, mod)) { - const ptr_ty = try mod.singleMutPtrType(param_ty); + if (isByRef(param_ty.toType(), mod)) { + const ptr_ty = try mod.singleMutPtrType(param_ty.toType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } else { - try param_di_types.append(try o.lowerDebugType(param_ty, .full)); + try param_di_types.append(try o.lowerDebugType(param_ty.toType(), .full)); } } @@ -2565,7 +2565,7 @@ pub const DeclGen = struct { if (gop.found_existing) return gop.value_ptr.*; assert(decl.has_tv); - const fn_info = zig_fn_type.fnInfo(); + const fn_info = mod.typeToFunc(zig_fn_type).?; const target = mod.getTarget(); const sret = firstParamSRet(fn_info, mod); @@ -2598,11 +2598,11 @@ pub const DeclGen = struct { dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0 dg.addArgAttr(llvm_fn, 0, "noalias"); - const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type); + const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type.toType()); llvm_fn.addSretAttr(raw_llvm_ret_ty); } - const err_return_tracing = fn_info.return_type.isError(mod) and + const err_return_tracing = fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { @@ -2626,13 +2626,13 @@ pub const DeclGen = struct { } if (fn_info.alignment != 0) { - llvm_fn.setAlignment(fn_info.alignment); + llvm_fn.setAlignment(@intCast(c_uint, fn_info.alignment)); } // Function attributes that are independent of analysis results of the function body. dg.addCommonFnAttributes(llvm_fn); - if (fn_info.return_type.isNoReturn()) { + if (fn_info.return_type == .noreturn_type) { dg.addFnAttr(llvm_fn, "noreturn"); } @@ -2645,15 +2645,15 @@ pub const DeclGen = struct { while (it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); if (!isByRef(param_ty, mod)) { dg.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_ty = fn_info.param_types[it.zig_index - 1]; - const param_llvm_ty = try dg.lowerType(param_ty); - const alignment = param_ty.abiAlignment(mod); + const param_llvm_ty = try dg.lowerType(param_ty.toType()); + const alignment = param_ty.toType().abiAlignment(mod); dg.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { @@ -3142,7 +3142,7 @@ pub const DeclGen = struct { fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*llvm.Type { const mod = dg.module; - const fn_info = fn_ty.fnInfo(); + const fn_info = mod.typeToFunc(fn_ty).?; const llvm_ret_ty = try lowerFnRetTy(dg, fn_info); var llvm_params = std.ArrayList(*llvm.Type).init(dg.gpa); @@ -3152,7 +3152,7 @@ pub const DeclGen = struct { try llvm_params.append(dg.context.pointerType(0)); } - if (fn_info.return_type.isError(mod) and + if (fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing) { const ptr_ty = try mod.singleMutPtrType(dg.object.getStackTraceType()); @@ -3163,19 +3163,19 @@ pub const DeclGen = struct { while (it.next()) |lowering| switch (lowering) { .no_bits => continue, .byval => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); try llvm_params.append(try dg.lowerType(param_ty)); }, .byref, .byref_mut => { try llvm_params.append(dg.context.pointerType(0)); }, .abi_sized_int => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); try llvm_params.append(dg.context.intType(abi_size * 8)); }, .slice => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) param_ty.optionalChild(mod).slicePtrFieldType(&buf, mod) @@ -3195,7 +3195,7 @@ pub const DeclGen = struct { try llvm_params.append(dg.context.intType(16)); }, .float_array => |count| { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); const field_count = @intCast(c_uint, count); const arr_ty = float_ty.arrayType(field_count); @@ -3223,7 +3223,7 @@ pub const DeclGen = struct { const mod = dg.module; const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, - .Fn => !elem_ty.fnInfo().is_generic, + .Fn => !mod.typeToFunc(elem_ty).?.is_generic, .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), }; @@ -4204,7 +4204,7 @@ pub const DeclGen = struct { const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or - (is_fn_body and decl.ty.fnInfo().is_generic)) + (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic)) { return self.lowerPtrToVoid(tv.ty); } @@ -4354,7 +4354,7 @@ pub const DeclGen = struct { llvm_fn: *llvm.Value, param_ty: Type, param_index: u32, - fn_info: Type.Payload.Function.Data, + fn_info: InternPool.Key.FuncType, llvm_arg_i: u32, ) void { const mod = dg.module; @@ -4774,8 +4774,8 @@ pub const FuncGen = struct { .Pointer => callee_ty.childType(mod), else => unreachable, }; - const fn_info = zig_fn_ty.fnInfo(); - const return_type = fn_info.return_type; + const fn_info = mod.typeToFunc(zig_fn_ty).?; + const return_type = fn_info.return_type.toType(); const llvm_fn = try self.resolveInst(pl_op.operand); const target = mod.getTarget(); const sret = firstParamSRet(fn_info, mod); @@ -4790,7 +4790,7 @@ pub const FuncGen = struct { break :blk ret_ptr; }; - const err_return_tracing = fn_info.return_type.isError(mod) and + const err_return_tracing = return_type.isError(mod) and self.dg.module.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { try llvm_args.append(self.err_ret_trace.?); @@ -4971,14 +4971,14 @@ pub const FuncGen = struct { while (it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); if (!isByRef(param_ty, mod)) { self.dg.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); const param_llvm_ty = try self.dg.lowerType(param_ty); const alignment = param_ty.abiAlignment(mod); self.dg.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); @@ -4998,7 +4998,7 @@ pub const FuncGen = struct { .slice => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const ptr_info = param_ty.ptrInfo(mod); const llvm_arg_i = it.llvm_index - 2; @@ -5023,7 +5023,7 @@ pub const FuncGen = struct { }; } - if (return_type.isNoReturn() and attr != .AlwaysTail) { + if (fn_info.return_type == .noreturn_type and attr != .AlwaysTail) { return null; } @@ -5088,9 +5088,9 @@ pub const FuncGen = struct { _ = self.builder.buildRetVoid(); return null; } - const fn_info = self.dg.decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(self.dg.decl.ty).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { - if (fn_info.return_type.isError(mod)) { + if (fn_info.return_type.toType().isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5135,9 +5135,9 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(mod); - const fn_info = self.dg.decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(self.dg.decl.ty).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { - if (fn_info.return_type.isError(mod)) { + if (fn_info.return_type.toType().isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -6148,25 +6148,21 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const is_internal_linkage = !mod.decl_exports.contains(decl_index); - var fn_ty_pl: Type.Payload.Function = .{ - .base = .{ .tag = .function }, - .data = .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = Type.void, - .alignment = 0, - .noalias_bits = 0, - .cc = .Unspecified, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - }, - }; - const fn_ty = Type.initPayload(&fn_ty_pl.base); + const fn_ty = try mod.funcType(.{ + .param_types = &.{}, + .return_type = .void_type, + .alignment = 0, + .noalias_bits = 0, + .comptime_bits = 0, + .cc = .Unspecified, + .is_var_args = false, + .is_generic = false, + .is_noinline = false, + .align_is_generic = false, + .cc_is_generic = false, + .section_is_generic = false, + .addrspace_is_generic = false, + }); const subprogram = dib.createFunction( di_file.toScope(), decl.name, @@ -10546,31 +10542,31 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { } } -fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *Module) bool { - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) return false; +fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool { + if (!fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) return false; const target = mod.getTarget(); switch (fn_info.cc) { - .Unspecified, .Inline => return isByRef(fn_info.return_type, mod), + .Unspecified, .Inline => return isByRef(fn_info.return_type.toType(), mod), .C => switch (target.cpu.arch) { .mips, .mipsel => return false, .x86_64 => switch (target.os.tag) { - .windows => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory, - else => return firstParamSRetSystemV(fn_info.return_type, mod), + .windows => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, + else => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), }, - .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, mod)[0] == .indirect, - .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, mod) == .memory, - .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) { + .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type.toType(), mod)[0] == .indirect, + .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, + .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type.toType(), mod, .ret)) { .memory, .i64_array => return true, .i32_array => |size| return size != 1, .byval => return false, }, - .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, mod) == .memory, + .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, else => return false, // TODO investigate C ABI for other architectures }, - .SysV => return firstParamSRetSystemV(fn_info.return_type, mod), - .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory, - .Stdcall => return !isScalar(mod, fn_info.return_type), + .SysV => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), + .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, + .Stdcall => return !isScalar(mod, fn_info.return_type.toType()), else => return false, } } @@ -10585,13 +10581,14 @@ fn firstParamSRetSystemV(ty: Type, mod: *Module) bool { /// In order to support the C calling convention, some return types need to be lowered /// completely differently in the function prototype to honor the C ABI, and then /// be effectively bitcasted to the actual return type. -fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { +fn lowerFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type { const mod = dg.module; - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) { + const return_type = fn_info.return_type.toType(); + if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. - if (fn_info.return_type.isError(mod)) { + if (return_type.isError(mod)) { return dg.lowerType(Type.anyerror); } else { return dg.context.voidType(); @@ -10600,61 +10597,61 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { const target = mod.getTarget(); switch (fn_info.cc) { .Unspecified, .Inline => { - if (isByRef(fn_info.return_type, mod)) { + if (isByRef(return_type, mod)) { return dg.context.voidType(); } else { - return dg.lowerType(fn_info.return_type); + return dg.lowerType(return_type); } }, .C => { switch (target.cpu.arch) { - .mips, .mipsel => return dg.lowerType(fn_info.return_type), + .mips, .mipsel => return dg.lowerType(return_type), .x86_64 => switch (target.os.tag) { .windows => return lowerWin64FnRetTy(dg, fn_info), else => return lowerSystemVFnRetTy(dg, fn_info), }, .wasm32 => { - if (isScalar(mod, fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } - const classes = wasm_c_abi.classifyType(fn_info.return_type, mod); + const classes = wasm_c_abi.classifyType(return_type, mod); if (classes[0] == .indirect or classes[0] == .none) { return dg.context.voidType(); } assert(classes[0] == .direct and classes[1] == .none); - const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, mod); + const scalar_type = wasm_c_abi.scalarType(return_type, mod); const abi_size = scalar_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); }, .aarch64, .aarch64_be => { - switch (aarch64_c_abi.classifyType(fn_info.return_type, mod)) { + switch (aarch64_c_abi.classifyType(return_type, mod)) { .memory => return dg.context.voidType(), - .float_array => return dg.lowerType(fn_info.return_type), - .byval => return dg.lowerType(fn_info.return_type), + .float_array => return dg.lowerType(return_type), + .byval => return dg.lowerType(return_type), .integer => { - const bit_size = fn_info.return_type.bitSize(mod); + const bit_size = return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => return dg.context.intType(64).arrayType(2), } }, .arm, .armeb => { - switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) { + switch (arm_c_abi.classifyType(return_type, mod, .ret)) { .memory, .i64_array => return dg.context.voidType(), .i32_array => |len| if (len == 1) { return dg.context.intType(32); } else { return dg.context.voidType(); }, - .byval => return dg.lowerType(fn_info.return_type), + .byval => return dg.lowerType(return_type), } }, .riscv32, .riscv64 => { - switch (riscv_c_abi.classifyType(fn_info.return_type, mod)) { + switch (riscv_c_abi.classifyType(return_type, mod)) { .memory => return dg.context.voidType(), .integer => { - const bit_size = fn_info.return_type.bitSize(mod); + const bit_size = return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => { @@ -10664,50 +10661,52 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { }; return dg.context.structType(&llvm_types_buffer, 2, .False); }, - .byval => return dg.lowerType(fn_info.return_type), + .byval => return dg.lowerType(return_type), } }, // TODO investigate C ABI for other architectures - else => return dg.lowerType(fn_info.return_type), + else => return dg.lowerType(return_type), } }, .Win64 => return lowerWin64FnRetTy(dg, fn_info), .SysV => return lowerSystemVFnRetTy(dg, fn_info), .Stdcall => { - if (isScalar(mod, fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } else { return dg.context.voidType(); } }, - else => return dg.lowerType(fn_info.return_type), + else => return dg.lowerType(return_type), } } -fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { +fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type { const mod = dg.module; - switch (x86_64_abi.classifyWindows(fn_info.return_type, mod)) { + const return_type = fn_info.return_type.toType(); + switch (x86_64_abi.classifyWindows(return_type, mod)) { .integer => { - if (isScalar(mod, fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } else { - const abi_size = fn_info.return_type.abiSize(mod); + const abi_size = return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } }, .win_i128 => return dg.context.intType(64).vectorType(2), .memory => return dg.context.voidType(), - .sse => return dg.lowerType(fn_info.return_type), + .sse => return dg.lowerType(return_type), else => unreachable, } } -fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { +fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type { const mod = dg.module; - if (isScalar(mod, fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + const return_type = fn_info.return_type.toType(); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } - const classes = x86_64_abi.classifySystemV(fn_info.return_type, mod, .ret); + const classes = x86_64_abi.classifySystemV(return_type, mod, .ret); if (classes[0] == .memory) { return dg.context.voidType(); } @@ -10748,7 +10747,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm } } if (classes[0] == .integer and classes[1] == .none) { - const abi_size = fn_info.return_type.abiSize(mod); + const abi_size = return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False); @@ -10756,7 +10755,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm const ParamTypeIterator = struct { dg: *DeclGen, - fn_info: Type.Payload.Function.Data, + fn_info: InternPool.Key.FuncType, zig_index: u32, llvm_index: u32, llvm_types_len: u32, @@ -10781,7 +10780,7 @@ const ParamTypeIterator = struct { if (it.zig_index >= it.fn_info.param_types.len) return null; const ty = it.fn_info.param_types[it.zig_index]; it.byval_attr = false; - return nextInner(it, ty); + return nextInner(it, ty.toType()); } /// `airCall` uses this instead of `next` so that it can take into account variadic functions. @@ -10793,7 +10792,7 @@ const ParamTypeIterator = struct { return nextInner(it, fg.typeOf(args[it.zig_index])); } } else { - return nextInner(it, it.fn_info.param_types[it.zig_index]); + return nextInner(it, it.fn_info.param_types[it.zig_index].toType()); } } @@ -11009,7 +11008,7 @@ const ParamTypeIterator = struct { } }; -fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTypeIterator { +fn iterateParamTypes(dg: *DeclGen, fn_info: InternPool.Key.FuncType) ParamTypeIterator { return .{ .dg = dg, .fn_info = fn_info, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 32ea975b6425..777bb1cff99d 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1227,8 +1227,9 @@ pub const DeclGen = struct { }, .Fn => switch (repr) { .direct => { + const fn_info = mod.typeToFunc(ty).?; // TODO: Put this somewhere in Sema.zig - if (ty.fnIsVarArgs()) + if (fn_info.is_var_args) return self.fail("VarArgs functions are unsupported for SPIR-V", .{}); const param_ty_refs = try self.gpa.alloc(CacheRef, ty.fnParamLen()); @@ -1546,18 +1547,17 @@ pub const DeclGen = struct { assert(decl.ty.zigTypeTag(mod) == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ - .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()), + .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType(mod)), .id_result = decl_id, .function_control = .{}, // TODO: We can set inline here if the type requires it. .function_type = prototype_id, }); - const params = decl.ty.fnParamLen(); - var i: usize = 0; + const fn_info = mod.typeToFunc(decl.ty).?; - try self.args.ensureUnusedCapacity(self.gpa, params); - while (i < params) : (i += 1) { - const param_type_id = try self.resolveTypeId(decl.ty.fnParamType(i)); + try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len); + for (fn_info.param_types) |param_type| { + const param_type_id = try self.resolveTypeId(param_type.toType()); const arg_result_id = self.spv.allocId(); try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{ .id_result_type = param_type_id, @@ -3338,10 +3338,10 @@ pub const DeclGen = struct { .Pointer => return self.fail("cannot call function pointers", .{}), else => unreachable, }; - const fn_info = zig_fn_ty.fnInfo(); + const fn_info = mod.typeToFunc(zig_fn_ty).?; const return_type = fn_info.return_type; - const result_type_id = try self.resolveTypeId(return_type); + const result_type_id = try self.resolveTypeId(return_type.toType()); const result_id = self.spv.allocId(); const callee_id = try self.resolve(pl_op.operand); @@ -3368,11 +3368,11 @@ pub const DeclGen = struct { .id_ref_3 = params[0..n_params], }); - if (return_type.isNoReturn()) { + if (return_type == .noreturn_type) { try self.func.body.emit(self.spv.gpa, .OpUnreachable, {}); } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (self.liveness.isUnused(inst) or !return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) { return null; } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 452356de2cac..efaeebc62e7a 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1430,7 +1430,7 @@ pub fn updateDeclExports( .x86 => std.builtin.CallingConvention.Stdcall, else => std.builtin.CallingConvention.C, }; - const decl_cc = exported_decl.ty.fnCallingConvention(); + const decl_cc = exported_decl.ty.fnCallingConvention(mod); if (decl_cc == .C and mem.eql(u8, exp.options.name, "main") and self.base.options.link_libc) { diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index b9722f8c95bc..92ea2a15dcfe 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1022,7 +1022,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) const decl_name_with_null = decl_name[0 .. decl_name.len + 1]; try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len); - const fn_ret_type = decl.ty.fnReturnType(); + const fn_ret_type = decl.ty.fnReturnType(mod); const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod); if (fn_ret_has_bits) { dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.subprogram)); diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index fbdcbd5a8e0a..da25753b9501 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -131,12 +131,12 @@ pub fn updateDecl(self: *SpirV, module: *Module, decl_index: Module.Decl.Index) pub fn updateDeclExports( self: *SpirV, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { - const decl = module.declPtr(decl_index); - if (decl.val.tag() == .function and decl.ty.fnCallingConvention() == .Kernel) { + const decl = mod.declPtr(decl_index); + if (decl.val.tag() == .function and decl.ty.fnCallingConvention(mod) == .Kernel) { // TODO: Unify with resolveDecl in spirv.zig. const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { diff --git a/src/target.zig b/src/target.zig index c89f8ce92ccf..ac78d27c1ab6 100644 --- a/src/target.zig +++ b/src/target.zig @@ -649,3 +649,14 @@ pub fn compilerRtIntAbbrev(bits: u16) []const u8 { else => "o", // Non-standard }; } + +pub fn fnCallConvAllowsZigTypes(target: std.Target, cc: std.builtin.CallingConvention) bool { + return switch (cc) { + .Unspecified, .Async, .Inline => true, + // For now we want to authorize PTX kernel to use zig objects, even if + // we end up exposing the ABI. The goal is to experiment with more + // integrated CPU/GPU code. + .Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64, + else => false, + }; +} diff --git a/src/type.zig b/src/type.zig index 32fa64a1ace9..daf8b305cc47 100644 --- a/src/type.zig +++ b/src/type.zig @@ -42,8 +42,6 @@ pub const Type = struct { .error_set_merged, => return .ErrorSet, - .function => return .Fn, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, @@ -66,6 +64,7 @@ pub const Type = struct { .union_type => return .Union, .opaque_type => return .Opaque, .enum_type => return .Enum, + .func_type => return .Fn, .simple_type => |s| switch (s) { .f16, .f32, @@ -344,53 +343,6 @@ pub const Type = struct { return true; }, - .function => { - if (b.zigTypeTag(mod) != .Fn) return false; - - const a_info = a.fnInfo(); - const b_info = b.fnInfo(); - - if (!a_info.return_type.isGenericPoison() and - !b_info.return_type.isGenericPoison() and - !eql(a_info.return_type, b_info.return_type, mod)) - return false; - - if (a_info.is_var_args != b_info.is_var_args) - return false; - - if (a_info.is_generic != b_info.is_generic) - return false; - - if (a_info.is_noinline != b_info.is_noinline) - return false; - - if (a_info.noalias_bits != b_info.noalias_bits) - return false; - - if (!a_info.cc_is_generic and a_info.cc != b_info.cc) - return false; - - if (!a_info.align_is_generic and a_info.alignment != b_info.alignment) - return false; - - if (a_info.param_types.len != b_info.param_types.len) - return false; - - for (a_info.param_types, 0..) |a_param_ty, i| { - const b_param_ty = b_info.param_types[i]; - if (a_info.comptime_params[i] != b_info.comptime_params[i]) - return false; - - if (a_param_ty.isGenericPoison()) continue; - if (b_param_ty.isGenericPoison()) continue; - - if (!eql(a_param_ty, b_param_ty, mod)) - return false; - } - - return true; - }, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, @@ -501,32 +453,6 @@ pub const Type = struct { std.hash.autoHash(hasher, ies); }, - .function => { - std.hash.autoHash(hasher, std.builtin.TypeId.Fn); - - const fn_info = ty.fnInfo(); - if (!fn_info.return_type.isGenericPoison()) { - hashWithHasher(fn_info.return_type, hasher, mod); - } - if (!fn_info.align_is_generic) { - std.hash.autoHash(hasher, fn_info.alignment); - } - if (!fn_info.cc_is_generic) { - std.hash.autoHash(hasher, fn_info.cc); - } - std.hash.autoHash(hasher, fn_info.is_var_args); - std.hash.autoHash(hasher, fn_info.is_generic); - std.hash.autoHash(hasher, fn_info.is_noinline); - std.hash.autoHash(hasher, fn_info.noalias_bits); - - std.hash.autoHash(hasher, fn_info.param_types.len); - for (fn_info.param_types, 0..) |param_ty, i| { - std.hash.autoHash(hasher, fn_info.paramIsComptime(i)); - if (param_ty.isGenericPoison()) continue; - hashWithHasher(param_ty, hasher, mod); - } - }, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, @@ -631,30 +557,6 @@ pub const Type = struct { }; }, - .function => { - const payload = self.castTag(.function).?.data; - const param_types = try allocator.alloc(Type, payload.param_types.len); - for (payload.param_types, 0..) |param_ty, i| { - param_types[i] = try param_ty.copy(allocator); - } - const other_comptime_params = payload.comptime_params[0..payload.param_types.len]; - const comptime_params = try allocator.dupe(bool, other_comptime_params); - return Tag.function.create(allocator, .{ - .return_type = try payload.return_type.copy(allocator), - .param_types = param_types, - .cc = payload.cc, - .alignment = payload.alignment, - .is_var_args = payload.is_var_args, - .is_generic = payload.is_generic, - .is_noinline = payload.is_noinline, - .comptime_params = comptime_params.ptr, - .align_is_generic = payload.align_is_generic, - .cc_is_generic = payload.cc_is_generic, - .section_is_generic = payload.section_is_generic, - .addrspace_is_generic = payload.addrspace_is_generic, - .noalias_bits = payload.noalias_bits, - }); - }, .pointer => { const payload = self.castTag(.pointer).?.data; const sent: ?Value = if (payload.sentinel) |some| @@ -766,32 +668,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .function => { - const payload = ty.castTag(.function).?.data; - try writer.writeAll("fn("); - for (payload.param_types, 0..) |param_type, i| { - if (i != 0) try writer.writeAll(", "); - try param_type.dump("", .{}, writer); - } - if (payload.is_var_args) { - if (payload.param_types.len != 0) { - try writer.writeAll(", "); - } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (payload.alignment != 0) { - try writer.print("align({d}) ", .{payload.alignment}); - } - if (payload.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(payload.cc)); - try writer.writeAll(") "); - } - ty = payload.return_type; - continue; - }, - .anyframe_T => { const return_type = ty.castTag(.anyframe_T).?.data; try writer.print("anyframe->", .{}); @@ -909,48 +785,6 @@ pub const Type = struct { try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); }, - .function => { - const fn_info = ty.fnInfo(); - if (fn_info.is_noinline) { - try writer.writeAll("noinline "); - } - try writer.writeAll("fn("); - for (fn_info.param_types, 0..) |param_ty, i| { - if (i != 0) try writer.writeAll(", "); - if (fn_info.paramIsComptime(i)) { - try writer.writeAll("comptime "); - } - if (std.math.cast(u5, i)) |index| if (@truncate(u1, fn_info.noalias_bits >> index) != 0) { - try writer.writeAll("noalias "); - }; - if (param_ty.isGenericPoison()) { - try writer.writeAll("anytype"); - } else { - try print(param_ty, writer, mod); - } - } - if (fn_info.is_var_args) { - if (fn_info.param_types.len != 0) { - try writer.writeAll(", "); - } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (fn_info.alignment != 0) { - try writer.print("align({d}) ", .{fn_info.alignment}); - } - if (fn_info.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(fn_info.cc)); - try writer.writeAll(") "); - } - if (fn_info.return_type.isGenericPoison()) { - try writer.writeAll("anytype"); - } else { - try print(fn_info.return_type, writer, mod); - } - }, - .error_union => { const error_union = ty.castTag(.error_union).?.data; try print(error_union.error_set, writer, mod); @@ -1158,6 +992,48 @@ pub const Type = struct { const decl = mod.declPtr(enum_type.decl); try decl.renderFullyQualifiedName(mod, writer); }, + .func_type => |fn_info| { + if (fn_info.is_noinline) { + try writer.writeAll("noinline "); + } + try writer.writeAll("fn("); + for (fn_info.param_types, 0..) |param_ty, i| { + if (i != 0) try writer.writeAll(", "); + if (std.math.cast(u5, i)) |index| { + if (fn_info.paramIsComptime(index)) { + try writer.writeAll("comptime "); + } + if (fn_info.paramIsNoalias(index)) { + try writer.writeAll("noalias "); + } + } + if (param_ty == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(param_ty.toType(), writer, mod); + } + } + if (fn_info.is_var_args) { + if (fn_info.param_types.len != 0) { + try writer.writeAll(", "); + } + try writer.writeAll("..."); + } + try writer.writeAll(") "); + if (fn_info.alignment != 0) { + try writer.print("align({d}) ", .{fn_info.alignment}); + } + if (fn_info.cc != .Unspecified) { + try writer.writeAll("callconv(."); + try writer.writeAll(@tagName(fn_info.cc)); + try writer.writeAll(") "); + } + if (fn_info.return_type == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(fn_info.return_type.toType(), writer, mod); + } + }, // values, not types .undef => unreachable, @@ -1174,6 +1050,11 @@ pub const Type = struct { } } + pub fn toIntern(ty: Type) InternPool.Index { + assert(ty.ip_index != .none); + return ty.ip_index; + } + pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { if (self.ip_index != .none) return self.ip_index.toValue(); switch (self.tag()) { @@ -1223,7 +1104,7 @@ pub const Type = struct { if (ignore_comptime_only) { return true; } else if (ty.childType(mod).zigTypeTag(mod) == .Fn) { - return !ty.childType(mod).fnInfo().is_generic; + return !mod.typeToFunc(ty.childType(mod)).?.is_generic; } else if (strat == .sema) { return !(try strat.sema.typeRequiresComptime(ty)); } else { @@ -1231,12 +1112,6 @@ pub const Type = struct { } }, - // These are false because they are comptime-only types. - // These are function *bodies*, not pointers. - // Special exceptions have to be made when emitting functions due to - // this returning false. - .function => return false, - .optional => { const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn()) { @@ -1262,7 +1137,7 @@ pub const Type = struct { // to comptime-only types do not, with the exception of function pointers. if (ignore_comptime_only) return true; const child_ty = ptr_type.elem_type.toType(); - if (child_ty.zigTypeTag(mod) == .Fn) return !child_ty.fnInfo().is_generic; + if (child_ty.zigTypeTag(mod) == .Fn) return !mod.typeToFunc(child_ty).?.is_generic; if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty)); return !comptimeOnly(ty, mod); }, @@ -1293,6 +1168,13 @@ pub const Type = struct { } }, .error_union_type => @panic("TODO"), + + // These are function *bodies*, not pointers. + // They return false here because they are comptime-only types. + // Special exceptions have to be made when emitting functions due to + // this returning false. + .func_type => false, + .simple_type => |t| switch (t) { .f16, .f32, @@ -1436,8 +1318,6 @@ pub const Type = struct { .error_set_single, .error_set_inferred, .error_set_merged, - // These are function bodies, not function pointers. - .function, .error_union, .anyframe_T, => false, @@ -1448,12 +1328,21 @@ pub const Type = struct { .optional => ty.isPtrLikeOptional(mod), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => true, - .ptr_type => true, + .int_type, + .ptr_type, + .vector_type, + => true, + + .error_union_type, + .anon_struct_type, + .opaque_type, + // These are function bodies, not function pointers. + .func_type, + => false, + .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), - .vector_type => true, .opt_type => |child| child.toType().isPtrLikeOptional(mod), - .error_union_type => false, + .simple_type => |t| switch (t) { .f16, .f32, @@ -1509,12 +1398,10 @@ pub const Type = struct { }; return struct_obj.layout != .Auto; }, - .anon_struct_type => false, .union_type => |union_type| switch (union_type.runtime_tag) { .none, .safety => mod.unionPtr(union_type.index).layout != .Auto, .tagged => false, }, - .opaque_type => false, .enum_type => |enum_type| switch (enum_type.tag_mode) { .auto => false, .explicit, .nonexhaustive => true, @@ -1546,7 +1433,7 @@ pub const Type = struct { pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { switch (ty.zigTypeTag(mod)) { .Fn => { - const fn_info = ty.fnInfo(); + const fn_info = mod.typeToFunc(ty).?; if (fn_info.is_generic) return false; if (fn_info.is_var_args) return true; switch (fn_info.cc) { @@ -1555,7 +1442,7 @@ pub const Type = struct { .Inline => return false, else => {}, } - if (fn_info.return_type.comptimeOnly(mod)) return false; + if (fn_info.return_type.toType().comptimeOnly(mod)) return false; return true; }, else => return ty.hasRuntimeBits(mod), @@ -1707,13 +1594,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, .none => switch (ty.tag()) { - // represents machine code; not a pointer - .function => { - const alignment = ty.castTag(.function).?.data.alignment; - if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment }; - return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; - }, - .pointer, .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, @@ -1753,6 +1633,13 @@ pub const Type = struct { .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), .error_union_type => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), + // represents machine code; not a pointer + .func_type => |func_type| { + const alignment = @intCast(u32, func_type.alignment); + if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment }; + return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; + }, + .simple_type => |t| switch (t) { .bool, .atomic_order, @@ -2086,7 +1973,6 @@ pub const Type = struct { .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, .none => switch (ty.tag()) { - .function => unreachable, // represents machine code; not a pointer .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, @@ -2187,6 +2073,7 @@ pub const Type = struct { .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), .error_union_type => @panic("TODO"), + .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { .bool, .atomic_order, @@ -2408,7 +2295,6 @@ pub const Type = struct { switch (ty.ip_index) { .none => switch (ty.tag()) { - .function => unreachable, // represents machine code; not a pointer .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, @@ -2453,6 +2339,7 @@ pub const Type = struct { }, .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), + .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { .f16 => return 16, .f32 => return 32, @@ -3271,6 +3158,7 @@ pub const Type = struct { .opt_type => unreachable, .error_union_type => unreachable, + .func_type => unreachable, .simple_type => unreachable, // handled via Index enum tag above .union_type => unreachable, @@ -3356,54 +3244,22 @@ pub const Type = struct { }; } - /// Asserts the type is a function. - pub fn fnParamLen(self: Type) usize { - return self.castTag(.function).?.data.param_types.len; - } - - /// Asserts the type is a function. The length of the slice must be at least the length - /// given by `fnParamLen`. - pub fn fnParamTypes(self: Type, types: []Type) void { - const payload = self.castTag(.function).?.data; - @memcpy(types[0..payload.param_types.len], payload.param_types); - } - - /// Asserts the type is a function. - pub fn fnParamType(self: Type, index: usize) Type { - switch (self.tag()) { - .function => { - const payload = self.castTag(.function).?.data; - return payload.param_types[index]; - }, - - else => unreachable, - } - } - /// Asserts the type is a function or a function pointer. - pub fn fnReturnType(ty: Type) Type { - const fn_ty = switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.pointee_type, - .function => ty, - else => unreachable, - }; - return fn_ty.castTag(.function).?.data.return_type; + pub fn fnReturnType(ty: Type, mod: *Module) Type { + return fnReturnTypeIp(ty, mod.intern_pool); } - /// Asserts the type is a function. - pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention { - return self.castTag(.function).?.data.cc; + pub fn fnReturnTypeIp(ty: Type, ip: InternPool) Type { + return switch (ip.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type, + .func_type => |func_type| func_type.return_type, + else => unreachable, + }.toType(); } /// Asserts the type is a function. - pub fn fnCallingConventionAllowsZigTypes(target: Target, cc: std.builtin.CallingConvention) bool { - return switch (cc) { - .Unspecified, .Async, .Inline => true, - // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. - // The goal is to experiment with more integrated CPU/GPU code. - .Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64, - else => false, - }; + pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention { + return mod.intern_pool.indexToKey(ty.ip_index).func_type.cc; } pub fn isValidParamType(self: Type, mod: *const Module) bool { @@ -3421,12 +3277,8 @@ pub const Type = struct { } /// Asserts the type is a function. - pub fn fnIsVarArgs(self: Type) bool { - return self.castTag(.function).?.data.is_var_args; - } - - pub fn fnInfo(ty: Type) Payload.Function.Data { - return ty.castTag(.function).?.data; + pub fn fnIsVarArgs(ty: Type, mod: *Module) bool { + return mod.intern_pool.indexToKey(ty.ip_index).func_type.is_var_args; } pub fn isNumeric(ty: Type, mod: *const Module) bool { @@ -3474,7 +3326,6 @@ pub const Type = struct { .error_set_single, .error_set, .error_set_merged, - .function, .error_set_inferred, .anyframe_T, .pointer, @@ -3500,7 +3351,12 @@ pub const Type = struct { return null; } }, - .ptr_type => return null, + + .ptr_type, + .error_union_type, + .func_type, + => return null, + .array_type => |array_type| { if (array_type.len == 0) return Value.initTag(.empty_array); @@ -3514,13 +3370,13 @@ pub const Type = struct { return null; }, .opt_type => |child| { - if (child.toType().isNoReturn()) { - return Value.null; + if (child == .noreturn_type) { + return try mod.nullValue(ty); } else { return null; } }, - .error_union_type => return null, + .simple_type => |t| switch (t) { .f16, .f32, @@ -3682,9 +3538,6 @@ pub const Type = struct { .error_set_merged, => false, - // These are function bodies, not function pointers. - .function => true, - .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, @@ -3721,6 +3574,9 @@ pub const Type = struct { .vector_type => |vector_type| vector_type.child.toType().comptimeOnly(mod), .opt_type => |child| child.toType().comptimeOnly(mod), .error_union_type => |error_union_type| error_union_type.payload_type.toType().comptimeOnly(mod), + // These are function bodies, not function pointers. + .func_type => true, + .simple_type => |t| switch (t) { .f16, .f32, @@ -4367,6 +4223,10 @@ pub const Type = struct { return ty.ip_index == .generic_poison_type; } + pub fn isBoundFn(ty: Type) bool { + return ty.ip_index == .none and ty.tag() == .bound_fn; + } + /// This enum does not directly correspond to `std.builtin.TypeId` because /// it has extra enum tags in it, as a way of using less memory. For example, /// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types @@ -4383,7 +4243,6 @@ pub const Type = struct { // After this, the tag requires a payload. pointer, - function, optional, error_union, anyframe_T, @@ -4411,7 +4270,6 @@ pub const Type = struct { .error_set_merged => Payload.ErrorSetMerged, .pointer => Payload.Pointer, - .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, }; @@ -4508,36 +4366,6 @@ pub const Type = struct { data: u16, }; - pub const Function = struct { - pub const base_tag = Tag.function; - - base: Payload = Payload{ .tag = base_tag }, - data: Data, - - // TODO look into optimizing this memory to take fewer bytes - pub const Data = struct { - param_types: []Type, - comptime_params: [*]bool, - return_type: Type, - /// If zero use default target function code alignment. - alignment: u32, - noalias_bits: u32, - cc: std.builtin.CallingConvention, - is_var_args: bool, - is_generic: bool, - is_noinline: bool, - align_is_generic: bool, - cc_is_generic: bool, - section_is_generic: bool, - addrspace_is_generic: bool, - - pub fn paramIsComptime(self: @This(), i: usize) bool { - assert(i < self.param_types.len); - return self.comptime_params[i]; - } - }; - }; - pub const ErrorSet = struct { pub const base_tag = Tag.error_set; diff --git a/src/value.zig b/src/value.zig index 50e3fc80610d..35d144f91227 100644 --- a/src/value.zig +++ b/src/value.zig @@ -602,6 +602,11 @@ pub const Value = struct { return result; } + pub fn toIntern(val: Value) InternPool.Index { + assert(val.ip_index != .none); + return val.ip_index; + } + /// Asserts that the value is representable as a type. pub fn toType(self: Value) Type { if (self.ip_index != .none) return self.ip_index.toType(); From f21ca3da190c8c64fd99c700f0840af59792b6b2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 18 May 2023 19:05:46 -0700 Subject: [PATCH 069/205] compiler: move `anyframe->T` to InternPool Also I moved `anyframe` from being represented by `SimpleType` to being represented by the `none` tag of `anyframe_type` because most code wants to handle these two types together. --- src/InternPool.zig | 34 +++++++++-- src/Module.zig | 6 ++ src/Sema.zig | 24 ++++---- src/type.zig | 142 ++++++++++++++++++--------------------------- 4 files changed, 101 insertions(+), 105 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index d4bfe5a24431..bf48aeda8454 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -132,6 +132,9 @@ pub const Key = union(enum) { array_type: ArrayType, vector_type: VectorType, opt_type: Index, + /// `anyframe->T`. The payload is the child type, which may be `none` to indicate + /// `anyframe`. + anyframe_type: Index, error_union_type: struct { error_set_type: Index, payload_type: Index, @@ -503,6 +506,7 @@ pub const Key = union(enum) { .array_type, .vector_type, .opt_type, + .anyframe_type, .error_union_type, .simple_type, .simple_value, @@ -597,7 +601,11 @@ pub const Key = union(enum) { }, .opt_type => |a_info| { const b_info = b.opt_type; - return std.meta.eql(a_info, b_info); + return a_info == b_info; + }, + .anyframe_type => |a_info| { + const b_info = b.anyframe_type; + return a_info == b_info; }, .error_union_type => |a_info| { const b_info = b.error_union_type; @@ -752,6 +760,7 @@ pub const Key = union(enum) { .array_type, .vector_type, .opt_type, + .anyframe_type, .error_union_type, .simple_type, .struct_type, @@ -1037,7 +1046,7 @@ pub const static_keys = [_]Key{ .{ .simple_type = .comptime_int }, .{ .simple_type = .comptime_float }, .{ .simple_type = .noreturn }, - .{ .simple_type = .@"anyframe" }, + .{ .anyframe_type = .none }, .{ .simple_type = .null }, .{ .simple_type = .undefined }, .{ .simple_type = .enum_literal }, @@ -1203,6 +1212,10 @@ pub const Tag = enum(u8) { /// An optional type. /// data is the child type. type_optional, + /// The type `anyframe->T`. + /// data is the child type. + /// If the child type is `none`, the type is `anyframe`. + type_anyframe, /// An error union type. /// data is payload to ErrorUnion. type_error_union, @@ -1421,7 +1434,6 @@ pub const SimpleType = enum(u32) { comptime_int, comptime_float, noreturn, - @"anyframe", null, undefined, enum_literal, @@ -1781,6 +1793,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }, .type_optional => .{ .opt_type = @intToEnum(Index, data) }, + .type_anyframe => .{ .anyframe_type = @intToEnum(Index, data) }, .type_error_union => @panic("TODO"), @@ -2144,10 +2157,18 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }), }); }, - .opt_type => |opt_type| { + .opt_type => |payload_type| { + assert(payload_type != .none); ip.items.appendAssumeCapacity(.{ .tag = .type_optional, - .data = @enumToInt(opt_type), + .data = @enumToInt(payload_type), + }); + }, + .anyframe_type => |payload_type| { + // payload_type might be none, indicating the type is `anyframe`. + ip.items.appendAssumeCapacity(.{ + .tag = .type_anyframe, + .data = @enumToInt(payload_type), }); }, .error_union_type => |error_union_type| { @@ -3063,7 +3084,7 @@ pub fn childType(ip: InternPool, i: Index) Index { .ptr_type => |ptr_type| ptr_type.elem_type, .vector_type => |vector_type| vector_type.child, .array_type => |array_type| array_type.child, - .opt_type => |child| child, + .opt_type, .anyframe_type => |child| child, else => unreachable, }; } @@ -3231,6 +3252,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_pointer => @sizeOf(Pointer), .type_slice => 0, .type_optional => 0, + .type_anyframe => 0, .type_error_union => @sizeOf(ErrorUnion), .type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit), .type_enum_auto => @sizeOf(EnumAuto), diff --git a/src/Module.zig b/src/Module.zig index c8e676f813b5..0a063a8ddc9d 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6869,6 +6869,12 @@ pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Typ return (try intern(mod, .{ .func_type = info })).toType(); } +/// Use this for `anyframe->T` only. +/// For `anyframe`, use the `InternPool.Index.anyframe` tag directly. +pub fn anyframeType(mod: *Module, payload_ty: Type) Allocator.Error!Type { + return (try intern(mod, .{ .anyframe_type = payload_ty.toIntern() })).toType(); +} + /// Supports optionals in addition to pointers. pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { if (ty.isPtrLikeOptional(mod)) { diff --git a/src/Sema.zig b/src/Sema.zig index eb8dc5a633f2..c855c5e18851 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8042,9 +8042,10 @@ fn zirAnyframeType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro if (true) { return sema.failWithUseOfAsync(block, inst_data.src()); } + const mod = sema.mod; const operand_src: LazySrcLoc = .{ .node_offset_anyframe_type = inst_data.src_node }; const return_type = try sema.resolveType(block, operand_src, inst_data.operand); - const anyframe_type = try Type.Tag.anyframe_T.create(sema.arena, return_type); + const anyframe_type = try mod.anyframeType(return_type); return sema.addType(anyframe_type); } @@ -31626,10 +31627,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), - .anyframe_T => { - const child_ty = ty.castTag(.anyframe_T).?.data; - return sema.resolveTypeRequiresComptime(child_ty); - }, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, @@ -31641,6 +31638,10 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return sema.resolveTypeRequiresComptime(child_ty); } }, + .anyframe_type => |child| { + if (child == .none) return false; + return sema.resolveTypeRequiresComptime(child.toType()); + }, .array_type => |array_type| return sema.resolveTypeRequiresComptime(array_type.child.toType()), .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), .opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()), @@ -31669,7 +31670,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .bool, .void, .anyerror, - .@"anyframe", .noreturn, .generic_poison, .var_args_param, @@ -33054,7 +33054,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set_merged, .error_union, .error_set_inferred, - .anyframe_T, .pointer, => return null, @@ -33083,6 +33082,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .ptr_type, .error_union_type, .func_type, + .anyframe_type, => null, .array_type => |array_type| { @@ -33130,7 +33130,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .anyerror, .comptime_int, .comptime_float, - .@"anyframe", .enum_literal, .atomic_order, .atomic_rmw_op, @@ -33688,10 +33687,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), - .anyframe_T => { - const child_ty = ty.castTag(.anyframe_T).?.data; - return sema.typeRequiresComptime(child_ty); - }, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return false, @@ -33703,6 +33698,10 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return sema.typeRequiresComptime(child_ty); } }, + .anyframe_type => |child| { + if (child == .none) return false; + return sema.typeRequiresComptime(child.toType()); + }, .array_type => |array_type| return sema.typeRequiresComptime(array_type.child.toType()), .vector_type => |vector_type| return sema.typeRequiresComptime(vector_type.child.toType()), .opt_type => |child| return sema.typeRequiresComptime(child.toType()), @@ -33733,7 +33732,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .bool, .void, .anyerror, - .@"anyframe", .noreturn, .generic_poison, .atomic_order, diff --git a/src/type.zig b/src/type.zig index daf8b305cc47..6986f2fc0790 100644 --- a/src/type.zig +++ b/src/type.zig @@ -50,21 +50,20 @@ pub const Type = struct { .optional => return .Optional, .error_union => return .ErrorUnion, - - .anyframe_T => return .AnyFrame, }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => return .Int, - .ptr_type => return .Pointer, - .array_type => return .Array, - .vector_type => return .Vector, - .opt_type => return .Optional, - .error_union_type => return .ErrorUnion, - .struct_type, .anon_struct_type => return .Struct, - .union_type => return .Union, - .opaque_type => return .Opaque, - .enum_type => return .Enum, - .func_type => return .Fn, + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => .Int, + .ptr_type => .Pointer, + .array_type => .Array, + .vector_type => .Vector, + .opt_type => .Optional, + .error_union_type => .ErrorUnion, + .struct_type, .anon_struct_type => .Struct, + .union_type => .Union, + .opaque_type => .Opaque, + .enum_type => .Enum, + .func_type => .Fn, + .anyframe_type => .AnyFrame, .simple_type => |s| switch (s) { .f16, .f32, @@ -72,7 +71,7 @@ pub const Type = struct { .f80, .f128, .c_longdouble, - => return .Float, + => .Float, .usize, .isize, @@ -85,20 +84,19 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - => return .Int, - - .anyopaque => return .Opaque, - .bool => return .Bool, - .void => return .Void, - .type => return .Type, - .anyerror => return .ErrorSet, - .comptime_int => return .ComptimeInt, - .comptime_float => return .ComptimeFloat, - .noreturn => return .NoReturn, - .@"anyframe" => return .AnyFrame, - .null => return .Null, - .undefined => return .Undefined, - .enum_literal => return .EnumLiteral, + => .Int, + + .anyopaque => .Opaque, + .bool => .Bool, + .void => .Void, + .type => .Type, + .anyerror => .ErrorSet, + .comptime_int => .ComptimeInt, + .comptime_float => .ComptimeFloat, + .noreturn => .NoReturn, + .null => .Null, + .undefined => .Undefined, + .enum_literal => .EnumLiteral, .atomic_order, .atomic_rmw_op, @@ -107,14 +105,14 @@ pub const Type = struct { .float_mode, .reduce_op, .call_modifier, - => return .Enum, + => .Enum, .prefetch_options, .export_options, .extern_options, - => return .Struct, + => .Struct, - .type_info => return .Union, + .type_info => .Union, .generic_poison => return error.GenericPoison, .var_args_param => unreachable, @@ -408,11 +406,6 @@ pub const Type = struct { return true; }, - - .anyframe_T => { - if (b.zigTypeTag(mod) != .AnyFrame) return false; - return a.elemType2(mod).eql(b.elemType2(mod), mod); - }, } } @@ -488,11 +481,6 @@ pub const Type = struct { const payload_ty = ty.errorUnionPayload(); hashWithHasher(payload_ty, hasher, mod); }, - - .anyframe_T => { - std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame); - hashWithHasher(ty.childType(mod), hasher, mod); - }, } } @@ -542,9 +530,7 @@ pub const Type = struct { .inferred_alloc_mut, => unreachable, - .optional, - .anyframe_T, - => { + .optional => { const payload = self.cast(Payload.ElemType).?; const new_payload = try allocator.create(Payload.ElemType); new_payload.* = .{ @@ -668,12 +654,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .anyframe_T => { - const return_type = ty.castTag(.anyframe_T).?.data; - try writer.print("anyframe->", .{}); - ty = return_type; - continue; - }, .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); @@ -838,11 +818,6 @@ pub const Type = struct { try writer.writeByte('?'); try print(child_type, writer, mod); }, - .anyframe_T => { - const return_type = ty.castTag(.anyframe_T).?.data; - try writer.print("anyframe->", .{}); - try print(return_type, writer, mod); - }, .error_set => { const names = ty.castTag(.error_set).?.data.names.keys(); try writer.writeAll("error{"); @@ -1034,6 +1009,11 @@ pub const Type = struct { try print(fn_info.return_type.toType(), writer, mod); } }, + .anyframe_type => |child| { + if (child == .none) return writer.writeAll("anyframe"); + try writer.writeAll("anyframe->"); + return print(child.toType(), writer, mod); + }, // values, not types .undef => unreachable, @@ -1098,9 +1078,7 @@ pub const Type = struct { // Pointers to zero-bit types still have a runtime address; however, pointers // to comptime-only types do not, with the exception of function pointers. - .anyframe_T, - .pointer, - => { + .pointer => { if (ignore_comptime_only) { return true; } else if (ty.childType(mod).zigTypeTag(mod) == .Fn) { @@ -1141,6 +1119,7 @@ pub const Type = struct { if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty)); return !comptimeOnly(ty, mod); }, + .anyframe_type => true, .array_type => |array_type| { if (array_type.sentinel != .none) { return array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); @@ -1195,7 +1174,6 @@ pub const Type = struct { .c_longdouble, .bool, .anyerror, - .@"anyframe", .anyopaque, .atomic_order, .atomic_rmw_op, @@ -1319,7 +1297,6 @@ pub const Type = struct { .error_set_inferred, .error_set_merged, .error_union, - .anyframe_T, => false, .inferred_alloc_mut => unreachable, @@ -1336,6 +1313,7 @@ pub const Type = struct { .error_union_type, .anon_struct_type, .opaque_type, + .anyframe_type, // These are function bodies, not function pointers. .func_type, => false, @@ -1366,7 +1344,6 @@ pub const Type = struct { => true, .anyerror, - .@"anyframe", .anyopaque, .atomic_order, .atomic_rmw_op, @@ -1594,9 +1571,7 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, .none => switch (ty.tag()) { - .pointer, - .anyframe_T, - => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + .pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, // TODO revisit this when we have the concept of the error tag type .error_set_inferred, @@ -1617,7 +1592,7 @@ pub const Type = struct { if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) }; }, - .ptr_type => { + .ptr_type, .anyframe_type => { return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; }, .array_type => |array_type| { @@ -1657,7 +1632,6 @@ pub const Type = struct { .isize, .export_options, .extern_options, - .@"anyframe", => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, @@ -1976,8 +1950,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .anyframe_T => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - .pointer => switch (ty.castTag(.pointer).?.data.size) { .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, else => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, @@ -2039,6 +2011,8 @@ pub const Type = struct { .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, }, + .anyframe_type => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, + .array_type => |array_type| { const len = array_type.len + @boolToInt(array_type.sentinel != .none); switch (try array_type.child.toType().abiSizeAdvanced(mod, strat)) { @@ -2102,7 +2076,6 @@ pub const Type = struct { .usize, .isize, - .@"anyframe", => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, @@ -2298,8 +2271,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .anyframe_T => return target.ptrBitWidth(), - .pointer => switch (ty.castTag(.pointer).?.data.size) { .Slice => return target.ptrBitWidth() * 2, else => return target.ptrBitWidth(), @@ -2323,6 +2294,8 @@ pub const Type = struct { .Slice => return target.ptrBitWidth() * 2, else => return target.ptrBitWidth() * 2, }, + .anyframe_type => return target.ptrBitWidth(), + .array_type => |array_type| { const len = array_type.len + @boolToInt(array_type.sentinel != .none); if (len == 0) return 0; @@ -2349,7 +2322,6 @@ pub const Type = struct { .usize, .isize, - .@"anyframe", => return target.ptrBitWidth(), .c_char => return target.c_type_bit_size(.char), @@ -2777,8 +2749,6 @@ pub const Type = struct { }, .optional => ty.castTag(.optional).?.data.childType(mod), - .anyframe_T => ty.castTag(.anyframe_T).?.data, - else => unreachable, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -2786,6 +2756,10 @@ pub const Type = struct { .One => ptr_type.elem_type.toType().shallowElemType(mod), .Many, .C, .Slice => ptr_type.elem_type.toType(), }, + .anyframe_type => |child| { + assert(child != .none); + return child.toType(); + }, .vector_type => |vector_type| vector_type.child.toType(), .array_type => |array_type| array_type.child.toType(), .opt_type => |child| mod.intern_pool.childType(child).toType(), @@ -3154,6 +3128,7 @@ pub const Type = struct { .anon_struct_type => unreachable, .ptr_type => unreachable, + .anyframe_type => unreachable, .array_type => unreachable, .opt_type => unreachable, @@ -3327,7 +3302,6 @@ pub const Type = struct { .error_set, .error_set_merged, .error_set_inferred, - .anyframe_T, .pointer, => return null, @@ -3355,6 +3329,7 @@ pub const Type = struct { .ptr_type, .error_union_type, .func_type, + .anyframe_type, => return null, .array_type => |array_type| { @@ -3401,7 +3376,6 @@ pub const Type = struct { .anyerror, .comptime_int, .comptime_float, - .@"anyframe", .enum_literal, .atomic_order, .atomic_rmw_op, @@ -3555,10 +3529,6 @@ pub const Type = struct { }, .error_union => return ty.errorUnionPayload().comptimeOnly(mod), - .anyframe_T => { - const child_ty = ty.castTag(.anyframe_T).?.data; - return child_ty.comptimeOnly(mod); - }, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, @@ -3570,6 +3540,10 @@ pub const Type = struct { return child_ty.comptimeOnly(mod); } }, + .anyframe_type => |child| { + if (child == .none) return false; + return child.toType().comptimeOnly(mod); + }, .array_type => |array_type| array_type.child.toType().comptimeOnly(mod), .vector_type => |vector_type| vector_type.child.toType().comptimeOnly(mod), .opt_type => |child| child.toType().comptimeOnly(mod), @@ -3599,7 +3573,6 @@ pub const Type = struct { .bool, .void, .anyerror, - .@"anyframe", .noreturn, .generic_poison, .atomic_order, @@ -4245,7 +4218,6 @@ pub const Type = struct { pointer, optional, error_union, - anyframe_T, error_set, error_set_single, /// The type is the inferred error set of a specific function. @@ -4261,9 +4233,7 @@ pub const Type = struct { .inferred_alloc_mut, => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"), - .optional, - .anyframe_T, - => Payload.ElemType, + .optional => Payload.ElemType, .error_set => Payload.ErrorSet, .error_set_inferred => Payload.ErrorSetInferred, From 607737d841bc2279cbe5fee68a0a546b9a5a802e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 18 May 2023 20:02:10 -0700 Subject: [PATCH 070/205] compiler: eliminate legacy Type.Tag.optional Now optional types are only stored in InternPool. --- src/Sema.zig | 50 ++--------------- src/type.zig | 153 +++++---------------------------------------------- 2 files changed, 19 insertions(+), 184 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index c855c5e18851..8492fd441f08 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -18589,12 +18589,10 @@ fn fieldType( return sema.addType(field.ty); }, .Optional => { - if (cur_ty.castTag(.optional)) |some| { - // Struct/array init through optional requires the child type to not be a pointer. - // If the child of .optional is a pointer it'll error on the next loop. - cur_ty = some.data; - continue; - } + // Struct/array init through optional requires the child type to not be a pointer. + // If the child of .optional is a pointer it'll error on the next loop. + cur_ty = mod.intern_pool.indexToKey(cur_ty.ip_index).opt_type.toType(); + continue; }, .ErrorUnion => { cur_ty = cur_ty.errorUnionPayload(); @@ -20390,7 +20388,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A ptr_info.@"align" = dest_align; var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); if (ptr_ty.zigTypeTag(mod) == .Optional) { - dest_ty = try Type.Tag.optional.create(sema.arena, dest_ty); + dest_ty = try mod.optionalType(dest_ty.toIntern()); } if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |val| { @@ -31622,10 +31620,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, - .optional => { - return sema.resolveTypeRequiresComptime(ty.optionalChild(mod)); - }, - .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -33057,15 +33051,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .pointer, => return null, - .optional => { - const child_ty = ty.optionalChild(mod); - if (child_ty.isNoReturn()) { - return Value.null; - } else { - return null; - } - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -33628,26 +33613,6 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .optional => { - const child_type = ty.optionalChild(mod); - if (child_type.zigTypeTag(mod) != .Pointer) return null; - - const info = child_type.ptrInfo(mod); - switch (info.size) { - .Slice, .C => return null, - .Many, .One => { - if (info.@"allowzero") return null; - - // optionals of zero sized types behave like bools, not pointers - if ((try sema.typeHasOnePossibleValue(child_type)) != null) { - return null; - } - - return child_type; - }, - } - }, - else => return null, } } @@ -33682,10 +33647,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, - .optional => { - return sema.typeRequiresComptime(ty.optionalChild(mod)); - }, - .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -33705,6 +33666,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .array_type => |array_type| return sema.typeRequiresComptime(array_type.child.toType()), .vector_type => |vector_type| return sema.typeRequiresComptime(vector_type.child.toType()), .opt_type => |child| return sema.typeRequiresComptime(child.toType()), + .error_union_type => |error_union_type| { return sema.typeRequiresComptime(error_union_type.payload_type.toType()); }, diff --git a/src/type.zig b/src/type.zig index 6986f2fc0790..735d532c4694 100644 --- a/src/type.zig +++ b/src/type.zig @@ -47,8 +47,6 @@ pub const Type = struct { .inferred_alloc_mut, => return .Pointer, - .optional => return .Optional, - .error_union => return .ErrorUnion, }, else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -283,10 +281,6 @@ pub const Type = struct { return switch (ty.ip_index) { .none => switch (ty.tag()) { .pointer => ty.castTag(.pointer).?.data, - .optional => b: { - const child_type = ty.optionalChild(mod); - break :b child_type.ptrInfo(mod); - }, else => unreachable, }, @@ -387,12 +381,6 @@ pub const Type = struct { return true; }, - .optional => { - if (b.zigTypeTag(mod) != .Optional) return false; - - return a.optionalChild(mod).eql(b.optionalChild(mod), mod); - }, - .error_union => { if (b.zigTypeTag(mod) != .ErrorUnion) return false; @@ -466,12 +454,6 @@ pub const Type = struct { std.hash.autoHash(hasher, info.size); }, - .optional => { - std.hash.autoHash(hasher, std.builtin.TypeId.Optional); - - hashWithHasher(ty.optionalChild(mod), hasher, mod); - }, - .error_union => { std.hash.autoHash(hasher, std.builtin.TypeId.ErrorUnion); @@ -530,19 +512,6 @@ pub const Type = struct { .inferred_alloc_mut, => unreachable, - .optional => { - const payload = self.cast(Payload.ElemType).?; - const new_payload = try allocator.create(Payload.ElemType); - new_payload.* = .{ - .base = .{ .tag = payload.base.tag }, - .data = try payload.data.copy(allocator), - }; - return Type{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .pointer => { const payload = self.castTag(.pointer).?.data; const sent: ?Value = if (payload.sentinel) |some| @@ -654,13 +623,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .optional => { - const child_type = ty.castTag(.optional).?.data; - try writer.writeByte('?'); - ty = child_type; - continue; - }, - .pointer => { const payload = ty.castTag(.pointer).?.data; if (payload.sentinel) |some| switch (payload.size) { @@ -813,11 +775,6 @@ pub const Type = struct { try print(info.pointee_type, writer, mod); }, - .optional => { - const child_type = ty.castTag(.optional).?.data; - try writer.writeByte('?'); - try print(child_type, writer, mod); - }, .error_set => { const names = ty.castTag(.error_set).?.data.names.keys(); try writer.writeAll("error{"); @@ -911,8 +868,7 @@ pub const Type = struct { }, .opt_type => |child| { try writer.writeByte('?'); - try print(child.toType(), writer, mod); - return; + return print(child.toType(), writer, mod); }, .error_union_type => |error_union_type| { try print(error_union_type.error_set_type.toType(), writer, mod); @@ -1090,21 +1046,6 @@ pub const Type = struct { } }, - .optional => { - const child_ty = ty.optionalChild(mod); - if (child_ty.isNoReturn()) { - // Then the optional is comptime-known to be null. - return false; - } - if (ignore_comptime_only) { - return true; - } else if (strat == .sema) { - return !(try strat.sema.typeRequiresComptime(child_ty)); - } else { - return !comptimeOnly(child_ty, mod); - } - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -1301,8 +1242,6 @@ pub const Type = struct { .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, - - .optional => ty.isPtrLikeOptional(mod), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type, @@ -1319,7 +1258,7 @@ pub const Type = struct { => false, .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), - .opt_type => |child| child.toType().isPtrLikeOptional(mod), + .opt_type => ty.isPtrLikeOptional(mod), .simple_type => |t| switch (t) { .f16, @@ -1484,7 +1423,6 @@ pub const Type = struct { return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } }, - .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema), else => unreachable, }, @@ -1510,11 +1448,6 @@ pub const Type = struct { .none => switch (ty.tag()) { .pointer => ty.castTag(.pointer).?.data.@"addrspace", - .optional => { - const child_type = ty.optionalChild(mod); - return child_type.ptrAddressSpace(mod); - }, - else => unreachable, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -1580,7 +1513,6 @@ pub const Type = struct { .error_set_merged, => return AbiAlignmentAdvanced{ .scalar = 2 }, - .optional => return abiAlignmentAdvancedOptional(ty, mod, strat), .error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), .inferred_alloc_const, @@ -1962,8 +1894,6 @@ pub const Type = struct { .error_set_single, => return AbiSizeAdvanced{ .scalar = 2 }, - .optional => return ty.abiSizeAdvancedOptional(mod, strat), - .error_union => { // This code needs to be kept in sync with the equivalent switch prong // in abiAlignmentAdvanced. @@ -2282,7 +2212,7 @@ pub const Type = struct { .error_set_merged, => return 16, // TODO revisit this when we have the concept of the error tag type - .optional, .error_union => { + .error_union => { // Optionals and error unions are not packed so their bitsize // includes padding bits. return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; @@ -2310,7 +2240,11 @@ pub const Type = struct { const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); return elem_bit_size * vector_type.len; }, - .opt_type => @panic("TODO"), + .opt_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, .error_union_type => @panic("TODO"), .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { @@ -2499,7 +2433,6 @@ pub const Type = struct { } pub const SlicePtrFieldTypeBuffer = union { - elem_type: Payload.ElemType, pointer: Payload.Pointer, }; @@ -2600,16 +2533,6 @@ pub const Type = struct { .One, .Many, .C => return true, }, - .optional => { - const child_type = ty.optionalChild(mod); - if (child_type.zigTypeTag(mod) != .Pointer) return false; - const info = child_type.ptrInfo(mod); - switch (info.size) { - .Slice, .C => return false, - .Many, .One => return !info.@"allowzero", - } - }, - else => return false, }, else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -2655,21 +2578,6 @@ pub const Type = struct { else => false, }; switch (ty.tag()) { - .optional => { - const child_ty = ty.castTag(.optional).?.data; - switch (child_ty.zigTypeTag(mod)) { - .Pointer => { - const info = child_ty.ptrInfo(mod); - switch (info.size) { - .C => return false, - .Slice, .Many, .One => return !info.@"allowzero", - } - }, - .ErrorSet => return true, - else => return false, - } - }, - .pointer => return ty.castTag(.pointer).?.data.size == .C, else => return false, @@ -2692,16 +2600,6 @@ pub const Type = struct { else => false, }; switch (ty.tag()) { - .optional => { - const child_ty = ty.castTag(.optional).?.data; - if (child_ty.zigTypeTag(mod) != .Pointer) return false; - const info = child_ty.ptrInfo(mod); - switch (info.size) { - .Slice, .C => return false, - .Many, .One => return !info.@"allowzero", - } - }, - .pointer => return ty.castTag(.pointer).?.data.size == .C, else => return false, @@ -2747,7 +2645,6 @@ pub const Type = struct { return child_ty; } }, - .optional => ty.castTag(.optional).?.data.childType(mod), else => unreachable, }, @@ -2784,13 +2681,10 @@ pub const Type = struct { } /// Asserts that the type is an optional. - /// Resulting `Type` will have inner memory referencing `buf`. /// Note that for C pointers this returns the type unmodified. pub fn optionalChild(ty: Type, mod: *const Module) Type { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .optional => ty.castTag(.optional).?.data, - .pointer, // here we assume it is a C pointer => return ty, @@ -3305,15 +3199,6 @@ pub const Type = struct { .pointer, => return null, - .optional => { - const child_ty = ty.optionalChild(mod); - if (child_ty.isNoReturn()) { - return Value.null; - } else { - return null; - } - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -3524,10 +3409,6 @@ pub const Type = struct { } }, - .optional => { - return ty.optionalChild(mod).comptimeOnly(mod); - }, - .error_union => return ty.errorUnionPayload().comptimeOnly(mod), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -4216,7 +4097,6 @@ pub const Type = struct { // After this, the tag requires a payload. pointer, - optional, error_union, error_set, error_set_single, @@ -4233,8 +4113,6 @@ pub const Type = struct { .inferred_alloc_mut, => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"), - .optional => Payload.ElemType, - .error_set => Payload.ErrorSet, .error_set_inferred => Payload.ErrorSetInferred, .error_set_merged => Payload.ErrorSetMerged, @@ -4326,11 +4204,6 @@ pub const Type = struct { data: u64, }; - pub const ElemType = struct { - base: Payload, - data: Type, - }; - pub const Bits = struct { base: Payload, data: u16, @@ -4570,11 +4443,11 @@ pub const Type = struct { } pub fn optional(arena: Allocator, child_type: Type, mod: *Module) Allocator.Error!Type { - if (child_type.ip_index != .none) { - return mod.optionalType(child_type.ip_index); - } else { - return Type.Tag.optional.create(arena, child_type); - } + // TODO: update callsites of this function to directly call + // mod.optionalType and then delete this function. + _ = arena; + + return mod.optionalType(child_type.ip_index); } pub fn errorUnion( From 7bf91fc79ac9e4eae575baf3a2ca9549bc3bf6c2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 18 May 2023 22:02:55 -0700 Subject: [PATCH 071/205] compiler: eliminate legacy Type.Tag.pointer Now pointer types are stored only in InternPool. --- src/InternPool.zig | 55 ++-- src/Module.zig | 36 +-- src/Sema.zig | 119 ++++---- src/arch/aarch64/CodeGen.zig | 3 +- src/arch/arm/CodeGen.zig | 3 +- src/arch/sparc64/CodeGen.zig | 3 +- src/arch/x86_64/CodeGen.zig | 21 +- src/codegen.zig | 9 +- src/codegen/c.zig | 56 +--- src/codegen/c/type.zig | 3 +- src/codegen/llvm.zig | 119 +++----- src/codegen/spirv.zig | 6 +- src/link/Dwarf.zig | 3 +- src/type.zig | 520 ++++++++--------------------------- src/value.zig | 12 +- 15 files changed, 295 insertions(+), 673 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index bf48aeda8454..81035bffc552 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -186,17 +186,11 @@ pub const Key = union(enum) { pub const PtrType = struct { elem_type: Index, sentinel: Index = .none, - /// If zero use pointee_type.abiAlignment() - /// When creating pointer types, if alignment is equal to pointee type - /// abi alignment, this value should be set to 0 instead. - /// - /// Please don't change this to u32 or u29. If you want to save bits, - /// migrate the rest of the codebase to use the `Alignment` type rather - /// than using byte units. The LLVM backend can only handle `c_uint` - /// byte units; we can emit a semantic analysis error if alignment that - /// overflows that amount is attempted to be used, but it shouldn't - /// affect the other backends. - alignment: u64 = 0, + /// `none` indicates the ABI alignment of the pointee_type. In this + /// case, this field *must* be set to `none`, otherwise the + /// `InternPool` equality and hashing functions will return incorrect + /// results. + alignment: Alignment = .none, /// If this is non-zero it means the pointer points to a sub-byte /// range of data, which is backed by a "host integer" with this /// number of bytes. @@ -378,15 +372,11 @@ pub const Key = union(enum) { /// Tells whether a parameter is noalias. See `paramIsNoalias` helper /// method for accessing this. noalias_bits: u32, - /// If zero use default target function code alignment. - /// - /// Please don't change this to u32 or u29. If you want to save bits, - /// migrate the rest of the codebase to use the `Alignment` type rather - /// than using byte units. The LLVM backend can only handle `c_uint` - /// byte units; we can emit a semantic analysis error if alignment that - /// overflows that amount is attempted to be used, but it shouldn't - /// affect the other backends. - alignment: u64, + /// `none` indicates the function has the default alignment for + /// function code on the target. In this case, this field *must* be set + /// to `none`, otherwise the `InternPool` equality and hashing + /// functions will return incorrect results. + alignment: Alignment, cc: std.builtin.CallingConvention, is_var_args: bool, is_generic: bool, @@ -1500,6 +1490,13 @@ pub const Alignment = enum(u6) { none = std.math.maxInt(u6), _, + pub fn toByteUnitsOptional(a: Alignment) ?u64 { + return switch (a) { + .none => null, + _ => @as(u64, 1) << @enumToInt(a), + }; + } + pub fn toByteUnits(a: Alignment, default: u64) u64 { return switch (a) { .none => default, @@ -1509,8 +1506,14 @@ pub const Alignment = enum(u6) { pub fn fromByteUnits(n: u64) Alignment { if (n == 0) return .none; + assert(std.math.isPowerOfTwo(n)); return @intToEnum(Alignment, @ctz(n)); } + + pub fn fromNonzeroByteUnits(n: u64) Alignment { + assert(n != 0); + return fromByteUnits(n); + } }; /// Used for non-sentineled arrays that have length fitting in u32, as well as @@ -1773,7 +1776,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { return .{ .ptr_type = .{ .elem_type = ptr_info.child, .sentinel = ptr_info.sentinel, - .alignment = ptr_info.flags.alignment.toByteUnits(0), + .alignment = ptr_info.flags.alignment, .size = ptr_info.flags.size, .is_const = ptr_info.flags.is_const, .is_volatile = ptr_info.flags.is_volatile, @@ -2013,7 +2016,7 @@ fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { .return_type = type_function.data.return_type, .comptime_bits = type_function.data.comptime_bits, .noalias_bits = type_function.data.noalias_bits, - .alignment = type_function.data.flags.alignment.toByteUnits(0), + .alignment = type_function.data.flags.alignment, .cc = type_function.data.flags.cc, .is_var_args = type_function.data.flags.is_var_args, .is_generic = type_function.data.flags.is_generic, @@ -2100,16 +2103,18 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } + const is_allowzero = ptr_type.is_allowzero or ptr_type.size == .C; + ip.items.appendAssumeCapacity(.{ .tag = .type_pointer, .data = try ip.addExtra(gpa, Pointer{ .child = ptr_type.elem_type, .sentinel = ptr_type.sentinel, .flags = .{ - .alignment = Alignment.fromByteUnits(ptr_type.alignment), + .alignment = ptr_type.alignment, .is_const = ptr_type.is_const, .is_volatile = ptr_type.is_volatile, - .is_allowzero = ptr_type.is_allowzero, + .is_allowzero = is_allowzero, .size = ptr_type.size, .address_space = ptr_type.address_space, .vector_index = ptr_type.vector_index, @@ -2316,7 +2321,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .comptime_bits = func_type.comptime_bits, .noalias_bits = func_type.noalias_bits, .flags = .{ - .alignment = Alignment.fromByteUnits(func_type.alignment), + .alignment = func_type.alignment, .cc = func_type.cc, .is_var_args = func_type.is_var_args, .is_generic = func_type.is_generic, diff --git a/src/Module.zig b/src/Module.zig index 0a063a8ddc9d..5cd0d237b4d8 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6532,8 +6532,7 @@ pub fn populateTestFunctions( try mod.ensureDeclAnalyzed(decl_index); } const decl = mod.declPtr(decl_index); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf, mod).childType(mod); + const tmp_test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod); const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions @@ -6843,28 +6842,31 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type } pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - if (child_type.ip_index == .none) { - // TODO remove this after all types can be represented via the InternPool - return Type.Tag.pointer.create(mod.tmp_hack_arena.allocator(), .{ - .pointee_type = child_type, - .@"addrspace" = .generic, - }); - } return ptrType(mod, .{ .elem_type = child_type.ip_index }); } pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - if (child_type.ip_index == .none) { - // TODO remove this after all types can be represented via the InternPool - return Type.Tag.pointer.create(mod.tmp_hack_arena.allocator(), .{ - .pointee_type = child_type, - .mutable = false, - .@"addrspace" = .generic, - }); - } return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); } +pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { + const info = ptr_ty.ptrInfoIp(mod.intern_pool); + return mod.ptrType(.{ + .elem_type = new_child.toIntern(), + + .sentinel = info.sentinel, + .alignment = info.alignment, + .host_size = info.host_size, + .bit_offset = info.bit_offset, + .vector_index = info.vector_index, + .size = info.size, + .is_const = info.is_const, + .is_volatile = info.is_volatile, + .is_allowzero = info.is_allowzero, + .address_space = info.address_space, + }); +} + pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type { return (try intern(mod, .{ .func_type = info })).toType(); } diff --git a/src/Sema.zig b/src/Sema.zig index 8492fd441f08..74efe9d141fa 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -9163,7 +9163,7 @@ fn funcCommon( .return_type = return_type.toIntern(), .cc = cc_resolved, .cc_is_generic = cc == null, - .alignment = alignment orelse 0, + .alignment = if (alignment) |a| InternPool.Alignment.fromByteUnits(a) else .none, .align_is_generic = alignment == null, .section_is_generic = section == .generic, .addrspace_is_generic = address_space == null, @@ -17740,10 +17740,10 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air extra_i += 1; const coerced = try sema.coerce(block, elem_ty, try sema.resolveInst(ref), sentinel_src); const val = try sema.resolveConstValue(block, sentinel_src, coerced, "pointer sentinel value must be comptime-known"); - break :blk val; - } else null; + break :blk val.toIntern(); + } else .none; - const abi_align: u32 = if (inst_data.flags.has_align) blk: { + const abi_align: InternPool.Alignment = if (inst_data.flags.has_align) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src); @@ -17752,13 +17752,13 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air // which case we can make this 0 without resolving it. if (val.castTag(.lazy_align)) |payload| { if (payload.data.eql(elem_ty, sema.mod)) { - break :blk 0; + break :blk .none; } } const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?); try sema.validateAlign(block, align_src, abi_align); - break :blk abi_align; - } else 0; + break :blk InternPool.Alignment.fromByteUnits(abi_align); + } else .none; const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); @@ -17789,7 +17789,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{}); } const fn_align = mod.typeToFunc(elem_ty).?.alignment; - if (inst_data.flags.has_align and abi_align != 0 and fn_align != 0 and + if (inst_data.flags.has_align and abi_align != .none and fn_align != .none and abi_align != fn_align) { return sema.fail(block, align_src, "function pointer alignment disagrees with function alignment", .{}); @@ -17815,16 +17815,16 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } } - const ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = elem_ty, + const ty = try mod.ptrType(.{ + .elem_type = elem_ty.toIntern(), .sentinel = sentinel, - .@"align" = abi_align, - .@"addrspace" = address_space, + .alignment = abi_align, + .address_space = address_space, .bit_offset = bit_offset, .host_size = host_size, - .mutable = inst_data.flags.is_mutable, - .@"allowzero" = inst_data.flags.is_allowzero, - .@"volatile" = inst_data.flags.is_volatile, + .is_const = !inst_data.flags.is_mutable, + .is_allowzero = inst_data.flags.is_allowzero, + .is_volatile = inst_data.flags.is_volatile, .size = inst_data.size, }); return sema.addType(ty); @@ -18905,10 +18905,13 @@ fn zirReify( if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?); + + const abi_align = InternPool.Alignment.fromByteUnits( + (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?, + ); const unresolved_elem_ty = child_val.toType(); - const elem_ty = if (abi_align == 0) + const elem_ty = if (abi_align == .none) unresolved_elem_ty else t: { const elem_ty = try sema.resolveTypeFields(unresolved_elem_ty); @@ -18918,18 +18921,21 @@ fn zirReify( const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val); - var actual_sentinel: ?Value = null; - if (!sentinel_val.isNull(mod)) { - if (ptr_size == .One or ptr_size == .C) { - return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); + const actual_sentinel: InternPool.Index = s: { + if (!sentinel_val.isNull(mod)) { + if (ptr_size == .One or ptr_size == .C) { + return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); + } + const sentinel_ptr_val = sentinel_val.castTag(.opt_payload).?.data; + const ptr_ty = try Type.ptr(sema.arena, mod, .{ + .@"addrspace" = .generic, + .pointee_type = try elem_ty.copy(sema.arena), + }); + const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; + break :s sent_val.toIntern(); } - const sentinel_ptr_val = sentinel_val.castTag(.opt_payload).?.data; - const ptr_ty = try Type.ptr(sema.arena, mod, .{ - .@"addrspace" = .generic, - .pointee_type = try elem_ty.copy(sema.arena), - }); - actual_sentinel = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; - } + break :s .none; + }; if (elem_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "pointer to noreturn not allowed", .{}); @@ -18938,7 +18944,7 @@ fn zirReify( return sema.fail(block, src, "function pointers must be single pointers", .{}); } const fn_align = mod.typeToFunc(elem_ty).?.alignment; - if (abi_align != 0 and fn_align != 0 and + if (abi_align != .none and fn_align != .none and abi_align != fn_align) { return sema.fail(block, src, "function pointer alignment disagrees with function alignment", .{}); @@ -18964,14 +18970,14 @@ fn zirReify( } } - const ty = try Type.ptr(sema.arena, mod, .{ + const ty = try mod.ptrType(.{ .size = ptr_size, - .mutable = !is_const_val.toBool(mod), - .@"volatile" = is_volatile_val.toBool(mod), - .@"align" = abi_align, - .@"addrspace" = mod.toEnum(std.builtin.AddressSpace, address_space_val), - .pointee_type = try elem_ty.copy(sema.arena), - .@"allowzero" = is_allowzero_val.toBool(mod), + .is_const = is_const_val.toBool(mod), + .is_volatile = is_volatile_val.toBool(mod), + .alignment = abi_align, + .address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val), + .elem_type = elem_ty.toIntern(), + .is_allowzero = is_allowzero_val.toBool(mod), .sentinel = actual_sentinel, }); return sema.addType(ty); @@ -19470,9 +19476,9 @@ fn zirReify( } const alignment = @intCast(u29, alignment_val.toUnsignedInt(mod)); if (alignment == target_util.defaultFunctionAlignment(target)) { - break :alignment 0; + break :alignment .none; } else { - break :alignment alignment; + break :alignment InternPool.Alignment.fromByteUnits(alignment); } }; const return_type = return_type_val.optionalValue(mod) orelse @@ -24291,8 +24297,7 @@ fn fieldPtr( const attr_ptr_ty = if (is_pointer_to) object_ty else object_ptr_ty; if (mem.eql(u8, field_name, "ptr")) { - const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); - const slice_ptr_ty = inner_ty.slicePtrFieldType(buf, mod); + const slice_ptr_ty = inner_ty.slicePtrFieldType(mod); const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = slice_ptr_ty, @@ -27914,7 +27919,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer), mod), + parent.ty.slicePtrFieldType(mod), &val_ptr.castTag(.slice).?.data.ptr, ptr_elem_ty, parent.decl_ref_mut, @@ -27981,7 +27986,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer), mod), + parent.ty.slicePtrFieldType(mod), &val_ptr.castTag(.slice).?.data.ptr, ptr_elem_ty, parent.decl_ref_mut, @@ -28363,7 +28368,7 @@ fn beginComptimePtrLoad( const slice_val = tv.val.castTag(.slice).?.data; deref.pointee = switch (field_index) { Value.Payload.Slice.ptr_index => TypedValue{ - .ty = field_ptr.container_ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer), mod), + .ty = field_ptr.container_ty.slicePtrFieldType(mod), .val = slice_val.ptr, }, Value.Payload.Slice.len_index => TypedValue{ @@ -29454,8 +29459,7 @@ fn analyzeSlicePtr( slice_ty: Type, ) CompileError!Air.Inst.Ref { const mod = sema.mod; - const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); - const result_ty = slice_ty.slicePtrFieldType(buf, mod); + const result_ty = slice_ty.slicePtrFieldType(mod); if (try sema.resolveMaybeUndefVal(slice)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(result_ty); return sema.addConstant(result_ty, val.slicePtr()); @@ -31611,15 +31615,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, - .pointer => { - const child_ty = ty.childType(mod); - if (child_ty.zigTypeTag(mod) == .Fn) { - return mod.typeToFunc(child_ty).?.is_generic; - } else { - return sema.resolveTypeRequiresComptime(child_ty); - } - }, - .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -33048,7 +33043,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set_merged, .error_union, .error_set_inferred, - .pointer, => return null, .inferred_alloc_const => unreachable, @@ -33604,12 +33598,6 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { }; switch (ty.tag()) { - .pointer => switch (ty.ptrSize(mod)) { - .Slice => return null, - .C => return ty.optionalChild(mod), - else => return ty, - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, @@ -33638,15 +33626,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, - .pointer => { - const child_ty = ty.childType(mod); - if (child_ty.zigTypeTag(mod) == .Fn) { - return mod.typeToFunc(child_ty).?.is_generic; - } else { - return sema.typeRequiresComptime(child_ty); - } - }, - .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index dea5b6312974..8b84189e1803 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3434,8 +3434,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&buf, mod); + const ptr_ty = slice_ty.slicePtrFieldType(mod); const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index e84c4de981c9..a6a715c75d08 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2432,8 +2432,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&buf, mod); + const ptr_ty = slice_ty.slicePtrFieldType(mod); const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 9d58dd9f29b3..072d3ed098fe 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -2462,8 +2462,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf, mod); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); const index_lock: ?RegisterLock = if (index_mcv == .register) self.register_manager.lockRegAssumeUnused(index_mcv.register) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 149f872c9aee..e83644269f68 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4052,8 +4052,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf, mod); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); const index_ty = self.typeOf(rhs); const index_mcv = try self.resolveInst(rhs); @@ -4082,8 +4081,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf, mod); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs); const dst_mcv = try self.allocRegOrMem(inst, false); try self.load(dst_mcv, slice_ptr_field_type, elem_ptr); @@ -4281,11 +4279,7 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { break :blk MCValue{ .register = reg }; } else ptr; - var ptr_tag_pl: Type.Payload.Pointer = .{ - .data = ptr_union_ty.ptrInfo(mod), - }; - ptr_tag_pl.data.pointee_type = tag_ty; - const ptr_tag_ty = Type.initPayload(&ptr_tag_pl.base); + const ptr_tag_ty = try mod.adjustPtrTypeChild(ptr_union_ty, tag_ty); try self.store(ptr_tag_ty, adjusted_ptr, tag); return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -8671,9 +8665,8 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC const pl_ty = opt_ty.optionalChild(mod); - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) - .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf, mod) else pl_ty } + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; @@ -8763,9 +8756,8 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const opt_ty = ptr_ty.childType(mod); const pl_ty = opt_ty.optionalChild(mod); - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) - .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf, mod) else pl_ty } + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; @@ -10803,8 +10795,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { // here to elide it. switch (dst_ptr_ty.ptrSize(mod)) { .Slice => { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf, mod); + const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(mod); // TODO: this only handles slices stored in the stack const ptr = dst_ptr; diff --git a/src/codegen.zig b/src/codegen.zig index 9eb294feacd2..8e145a3b32c6 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -347,8 +347,7 @@ pub fn generateSymbol( const slice = typed_value.val.castTag(.slice).?.data; // generate ptr - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf, mod); + const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(mod); switch (try generateSymbol(bin_file, src_loc, .{ .ty = slice_ptr_field_type, .val = slice.ptr, @@ -850,10 +849,9 @@ fn lowerParentPtr( reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag(mod)) { .Pointer => offset: { assert(field_ptr.container_ty.isSlice(mod)); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; break :offset switch (field_ptr.field_index) { 0 => 0, - 1 => field_ptr.container_ty.slicePtrFieldType(&buf, mod).abiSize(mod), + 1 => field_ptr.container_ty.slicePtrFieldType(mod).abiSize(mod), else => unreachable, }; }, @@ -952,8 +950,7 @@ fn lowerDeclRef( const mod = bin_file.options.module.?; if (typed_value.ty.isSlice(mod)) { // generate ptr - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf, mod); + const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(mod); switch (try generateSymbol(bin_file, src_loc, .{ .ty = slice_ptr_field_type, .val = typed_value.val, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 601382c1fdf4..c2a108d68ed0 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -566,8 +566,7 @@ pub const DeclGen = struct { try writer.writeAll("){ .ptr = "); } - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try dg.renderValue(writer, ty.slicePtrFieldType(&buf, mod), val.slicePtr(), .Initializer); + try dg.renderValue(writer, ty.slicePtrFieldType(mod), val.slicePtr(), .Initializer); const len_val = try mod.intValue(Type.usize, val.sliceLen(mod)); @@ -631,11 +630,7 @@ pub const DeclGen = struct { // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(field_ptr.container_ty, .complete); - var container_ptr_pl: Type.Payload.Pointer = .{ - .data = ptr_ty.ptrInfo(mod), - }; - container_ptr_pl.data.pointee_type = field_ptr.container_ty; - const container_ptr_ty = Type.initPayload(&container_ptr_pl.base); + const container_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, field_ptr.container_ty); switch (fieldLocation( field_ptr.container_ty, @@ -661,11 +656,7 @@ pub const DeclGen = struct { try dg.writeCValue(writer, field); }, .byte_offset => |byte_offset| { - var u8_ptr_pl: Type.Payload.Pointer = .{ - .data = ptr_ty.ptrInfo(mod), - }; - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, Type.u8); const byte_offset_val = try mod.intValue(Type.usize, byte_offset); @@ -788,8 +779,7 @@ pub const DeclGen = struct { } try writer.writeAll("{("); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); try dg.renderType(writer, ptr_ty); return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); } else { @@ -1068,10 +1058,9 @@ pub const DeclGen = struct { } const slice = val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; try writer.writeByte('{'); - try dg.renderValue(writer, ty.slicePtrFieldType(&buf, mod), slice.ptr, initializer_type); + try dg.renderValue(writer, ty.slicePtrFieldType(mod), slice.ptr, initializer_type); try writer.writeAll(", "); try dg.renderValue(writer, Type.usize, slice.len, initializer_type); try writer.writeByte('}'); @@ -1536,8 +1525,8 @@ pub const DeclGen = struct { switch (kind) { .forward => {}, - .complete => if (fn_info.alignment > 0) - try w.print(" zig_align_fn({})", .{fn_info.alignment}), + .complete => if (fn_info.alignment.toByteUnitsOptional()) |a| + try w.print(" zig_align_fn({})", .{a}), else => unreachable, } @@ -1561,8 +1550,8 @@ pub const DeclGen = struct { ); switch (kind) { - .forward => if (fn_info.alignment > 0) - try w.print(" zig_align_fn({})", .{fn_info.alignment}), + .forward => if (fn_info.alignment.toByteUnitsOptional()) |a| + try w.print(" zig_align_fn({})", .{a}), .complete => {}, else => unreachable, } @@ -4062,8 +4051,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.typeOfIndex(inst); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = inst_ty.slicePtrFieldType(&buf, mod); + const ptr_ty = inst_ty.slicePtrFieldType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -5047,7 +5035,6 @@ fn airIsNull( const operand_ty = f.typeOf(un_op); const optional_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = optional_ty.optionalChild(mod); - var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) TypedValue{ .ty = Type.bool, .val = Value.true } @@ -5058,7 +5045,7 @@ fn airIsNull( TypedValue{ .ty = payload_ty, .val = try mod.intValue(payload_ty, 0) } else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); - const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf, mod); + const slice_ptr_ty = payload_ty.slicePtrFieldType(mod); break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null }; } else rhs: { try writer.writeAll(".is_null"); @@ -5278,11 +5265,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, mod)) { .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), .field => |field| { - var u8_ptr_pl: Type.Payload.Pointer = .{ - .data = field_ptr_ty.ptrInfo(mod), - }; - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5295,11 +5278,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("))"); }, .byte_offset => |byte_offset| { - var u8_ptr_pl: Type.Payload.Pointer = .{ - .data = field_ptr_ty.ptrInfo(mod), - }; - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8); const byte_offset_val = try mod.intValue(Type.usize, byte_offset); @@ -5347,11 +5326,7 @@ fn fieldPtr( try f.writeCValueDerefMember(writer, container_ptr_val, field); }, .byte_offset => |byte_offset| { - var u8_ptr_pl: Type.Payload.Pointer = .{ - .data = field_ptr_ty.ptrInfo(mod), - }; - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8); const byte_offset_val = try mod.intValue(Type.usize, byte_offset); @@ -5794,8 +5769,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { // Unfortunately, C does not support any equivalent to // &(*(void *)p)[0], although LLVM does via GetElementPtr if (operand == .undef) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf, mod) }, .Initializer); + try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(mod) }, .Initializer); } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index a2af395a984b..70426972af6a 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1431,8 +1431,7 @@ pub const CType = extern union { .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), .payload => unreachable, }) |fwd_idx| { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| { self.storage = .{ .anon = undefined }; self.storage.anon.fields[0] = .{ diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 476f73cbe432..0c503edee404 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1591,40 +1591,30 @@ pub const Object = struct { }, .Pointer => { // Normalize everything that the debug info does not represent. - const ptr_info = ty.ptrInfo(mod); + const ptr_info = ty.ptrInfoIp(mod.intern_pool); - if (ptr_info.sentinel != null or - ptr_info.@"addrspace" != .generic or + if (ptr_info.sentinel != .none or + ptr_info.address_space != .generic or ptr_info.bit_offset != 0 or ptr_info.host_size != 0 or ptr_info.vector_index != .none or - ptr_info.@"allowzero" or - !ptr_info.mutable or - ptr_info.@"volatile" or + ptr_info.is_allowzero or + ptr_info.is_const or + ptr_info.is_volatile or ptr_info.size == .Many or ptr_info.size == .C or - !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) + !ptr_info.elem_type.toType().hasRuntimeBitsIgnoreComptime(mod)) { - var payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = ptr_info.pointee_type, - .sentinel = null, - .@"align" = ptr_info.@"align", - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = switch (ptr_info.size) { - .Many, .C, .One => .One, - .Slice => .Slice, - }, + const bland_ptr_ty = try mod.ptrType(.{ + .elem_type = if (!ptr_info.elem_type.toType().hasRuntimeBitsIgnoreComptime(mod)) + .anyopaque_type + else + ptr_info.elem_type, + .alignment = ptr_info.alignment, + .size = switch (ptr_info.size) { + .Many, .C, .One => .One, + .Slice => .Slice, }, - }; - if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) { - payload.data.pointee_type = Type.anyopaque; - } - const bland_ptr_ty = Type.initPayload(&payload.base); + }); const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module }); @@ -1632,8 +1622,7 @@ pub const Object = struct { } if (ty.isSlice(mod)) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); const len_ty = Type.usize; const name = try ty.nameAlloc(gpa, o.module); @@ -1711,7 +1700,7 @@ pub const Object = struct { return full_di_ty; } - const elem_di_ty = try o.lowerDebugType(ptr_info.pointee_type, .fwd); + const elem_di_ty = try o.lowerDebugType(ptr_info.elem_type.toType(), .fwd); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); const ptr_di_ty = dib.createPointerType( @@ -2625,8 +2614,8 @@ pub const DeclGen = struct { }, } - if (fn_info.alignment != 0) { - llvm_fn.setAlignment(@intCast(c_uint, fn_info.alignment)); + if (fn_info.alignment.toByteUnitsOptional()) |a| { + llvm_fn.setAlignment(@intCast(c_uint, a)); } // Function attributes that are independent of analysis results of the function body. @@ -2819,8 +2808,7 @@ pub const DeclGen = struct { .Bool => return dg.context.intType(1), .Pointer => { if (t.isSlice(mod)) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_type = t.slicePtrFieldType(&buf, mod); + const ptr_type = t.slicePtrFieldType(mod); const fields: [2]*llvm.Type = .{ try dg.lowerType(ptr_type), @@ -3176,11 +3164,10 @@ pub const DeclGen = struct { }, .slice => { const param_ty = fn_info.param_types[it.zig_index - 1].toType(); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) - param_ty.optionalChild(mod).slicePtrFieldType(&buf, mod) + param_ty.optionalChild(mod).slicePtrFieldType(mod) else - param_ty.slicePtrFieldType(&buf, mod); + param_ty.slicePtrFieldType(mod); const ptr_llvm_ty = try dg.lowerType(ptr_ty); const len_llvm_ty = try dg.lowerType(Type.usize); @@ -3368,10 +3355,9 @@ pub const DeclGen = struct { }, .slice => { const slice = tv.val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; const fields: [2]*llvm.Value = .{ try dg.lowerValue(.{ - .ty = tv.ty.slicePtrFieldType(&buf, mod), + .ty = tv.ty.slicePtrFieldType(mod), .val = slice.ptr, }), try dg.lowerValue(.{ @@ -4171,8 +4157,7 @@ pub const DeclGen = struct { ) Error!*llvm.Value { const mod = self.module; if (tv.ty.isSlice(mod)) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = tv.ty.slicePtrFieldType(&buf, mod); + const ptr_ty = tv.ty.slicePtrFieldType(mod); const fields: [2]*llvm.Value = .{ try self.lowerValue(.{ .ty = ptr_ty, @@ -6043,17 +6028,14 @@ pub const FuncGen = struct { const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ .elem_type = llvm_field.ty.ip_index, - .alignment = llvm_field.alignment, + .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), }); if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; - const field_alignment = if (llvm_field.alignment != 0) - llvm_field.alignment - else - llvm_field.ty.abiAlignment(mod); - return self.loadByRef(field_ptr, field_ty, field_alignment, false); + assert(llvm_field.alignment != 0); + return self.loadByRef(field_ptr, field_ty, llvm_field.alignment, false); } else { return self.load(field_ptr, field_ptr_ty); } @@ -6151,7 +6133,7 @@ pub const FuncGen = struct { const fn_ty = try mod.funcType(.{ .param_types = &.{}, .return_type = .void_type, - .alignment = 0, + .alignment = .none, .noalias_bits = 0, .comptime_bits = 0, .cc = .Unspecified, @@ -6655,8 +6637,7 @@ pub const FuncGen = struct { operand; if (payload_ty.isSlice(mod)) { const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); - var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(&slice_buf, mod)); + const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(mod)); return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), ""); } return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); @@ -6923,7 +6904,7 @@ pub const FuncGen = struct { const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ .elem_type = llvm_field.ty.ip_index, - .alignment = llvm_field.alignment, + .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), }); return self.load(field_ptr, field_ptr_ty); } @@ -9319,14 +9300,12 @@ pub const FuncGen = struct { const llvm_i = llvmField(result_ty, i, mod).?.index; indices[1] = llvm_u32.constInt(llvm_i, .False); const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); - var field_ptr_payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = self.typeOf(elem), - .@"align" = result_ty.structFieldAlign(i, mod), - .@"addrspace" = .generic, - }, - }; - const field_ptr_ty = Type.initPayload(&field_ptr_payload.base); + const field_ptr_ty = try mod.ptrType(.{ + .elem_type = self.typeOf(elem).toIntern(), + .alignment = InternPool.Alignment.fromNonzeroByteUnits( + result_ty.structFieldAlign(i, mod), + ), + }); try self.store(field_ptr, field_ptr_ty, llvm_elem, .NotAtomic); } @@ -9350,13 +9329,9 @@ pub const FuncGen = struct { const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); const array_info = result_ty.arrayInfo(mod); - var elem_ptr_payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = array_info.elem_type, - .@"addrspace" = .generic, - }, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_payload.base); + const elem_ptr_ty = try mod.ptrType(.{ + .elem_type = array_info.elem_type.toIntern(), + }); for (elements, 0..) |elem, i| { const indices: [2]*llvm.Value = .{ @@ -9476,14 +9451,10 @@ pub const FuncGen = struct { // tag and the payload. const index_type = self.context.intType(32); - var field_ptr_payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = field.ty, - .@"align" = field_align, - .@"addrspace" = .generic, - }, - }; - const field_ptr_ty = Type.initPayload(&field_ptr_payload.base); + const field_ptr_ty = try mod.ptrType(.{ + .elem_type = field.ty.toIntern(), + .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align), + }); if (layout.tag_size == 0) { const indices: [3]*llvm.Value = .{ index_type.constNull(), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 777bb1cff99d..eada74e6d4db 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -669,8 +669,7 @@ pub const DeclGen = struct { .slice => { const slice = val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); try self.lower(ptr_ty, slice.ptr); try self.addInt(Type.usize, slice.len); @@ -2991,9 +2990,8 @@ pub const DeclGen = struct { if (optional_ty.optionalReprIsPayload(mod)) { // Pointer payload represents nullability: pointer or slice. - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = if (payload_ty.isSlice(mod)) - payload_ty.slicePtrFieldType(&ptr_buf, mod) + payload_ty.slicePtrFieldType(mod) else payload_ty; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 92ea2a15dcfe..f4f19f30d024 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -277,8 +277,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - var buf = try arena.create(Type.SlicePtrFieldTypeBuffer); - const ptr_ty = ty.slicePtrFieldType(buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.ensureUnusedCapacity(6); diff --git a/src/type.zig b/src/type.zig index 735d532c4694..ebe3d52b05f9 100644 --- a/src/type.zig +++ b/src/type.zig @@ -42,7 +42,6 @@ pub const Type = struct { .error_set_merged, => return .ErrorSet, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, => return .Pointer, @@ -250,17 +249,9 @@ pub const Type = struct { return elem_ty; } + /// Asserts the type is a pointer. pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.mutable, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| !ptr_type.is_const, - else => unreachable, - }, - }; + return !mod.intern_pool.indexToKey(ty.ip_index).ptr_type.is_const; } pub const ArrayInfo = struct { @@ -277,24 +268,21 @@ pub const Type = struct { }; } - pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |p| Payload.Pointer.Data.fromKey(p), - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |p| Payload.Pointer.Data.fromKey(p), - else => unreachable, - }, + pub fn ptrInfoIp(ty: Type, ip: InternPool) InternPool.Key.PtrType { + return switch (ip.indexToKey(ty.ip_index)) { + .ptr_type => |p| p, + .opt_type => |child| switch (ip.indexToKey(child)) { + .ptr_type => |p| p, else => unreachable, }, + else => unreachable, }; } + pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { + return Payload.Pointer.Data.fromKey(ptrInfoIp(ty, mod.intern_pool)); + } + pub fn eql(a: Type, b: Type, mod: *Module) bool { if (a.ip_index != .none or b.ip_index != .none) { // The InternPool data structure hashes based on Key to make interned objects @@ -335,7 +323,6 @@ pub const Type = struct { return true; }, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, => { @@ -434,7 +421,6 @@ pub const Type = struct { std.hash.autoHash(hasher, ies); }, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, => { @@ -512,26 +498,6 @@ pub const Type = struct { .inferred_alloc_mut, => unreachable, - .pointer => { - const payload = self.castTag(.pointer).?.data; - const sent: ?Value = if (payload.sentinel) |some| - try some.copy(allocator) - else - null; - return Tag.pointer.create(allocator, .{ - .pointee_type = try payload.pointee_type.copy(allocator), - .sentinel = sent, - .@"align" = payload.@"align", - .@"addrspace" = payload.@"addrspace", - .bit_offset = payload.bit_offset, - .host_size = payload.host_size, - .vector_index = payload.vector_index, - .@"allowzero" = payload.@"allowzero", - .mutable = payload.mutable, - .@"volatile" = payload.@"volatile", - .size = payload.size, - }); - }, .error_union => { const payload = self.castTag(.error_union).?.data; return Tag.error_union.create(allocator, .{ @@ -623,41 +589,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .pointer => { - const payload = ty.castTag(.pointer).?.data; - if (payload.sentinel) |some| switch (payload.size) { - .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{some.fmtDebug()}), - .Slice => try writer.print("[:{}]", .{some.fmtDebug()}), - } else switch (payload.size) { - .One => try writer.writeAll("*"), - .Many => try writer.writeAll("[*]"), - .C => try writer.writeAll("[*c]"), - .Slice => try writer.writeAll("[]"), - } - if (payload.@"align" != 0 or payload.host_size != 0 or payload.vector_index != .none) { - try writer.print("align({d}", .{payload.@"align"}); - - if (payload.bit_offset != 0 or payload.host_size != 0) { - try writer.print(":{d}:{d}", .{ payload.bit_offset, payload.host_size }); - } - if (payload.vector_index == .runtime) { - try writer.writeAll(":?"); - } else if (payload.vector_index != .none) { - try writer.print(":{d}", .{@enumToInt(payload.vector_index)}); - } - try writer.writeAll(") "); - } - if (payload.@"addrspace" != .generic) { - try writer.print("addrspace(.{s}) ", .{@tagName(payload.@"addrspace")}); - } - if (!payload.mutable) try writer.writeAll("const "); - if (payload.@"volatile") try writer.writeAll("volatile "); - if (payload.@"allowzero" and payload.size != .C) try writer.writeAll("allowzero "); - - ty = payload.pointee_type; - continue; - }, .error_union => { const payload = ty.castTag(.error_union).?.data; try payload.error_set.dump("", .{}, writer); @@ -734,47 +665,6 @@ pub const Type = struct { try print(error_union.payload, writer, mod); }, - .pointer => { - const info = ty.ptrInfo(mod); - - if (info.sentinel) |s| switch (info.size) { - .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, mod)}), - .Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, mod)}), - } else switch (info.size) { - .One => try writer.writeAll("*"), - .Many => try writer.writeAll("[*]"), - .C => try writer.writeAll("[*c]"), - .Slice => try writer.writeAll("[]"), - } - if (info.@"align" != 0 or info.host_size != 0 or info.vector_index != .none) { - if (info.@"align" != 0) { - try writer.print("align({d}", .{info.@"align"}); - } else { - const alignment = info.pointee_type.abiAlignment(mod); - try writer.print("align({d}", .{alignment}); - } - - if (info.bit_offset != 0 or info.host_size != 0) { - try writer.print(":{d}:{d}", .{ info.bit_offset, info.host_size }); - } - if (info.vector_index == .runtime) { - try writer.writeAll(":?"); - } else if (info.vector_index != .none) { - try writer.print(":{d}", .{@enumToInt(info.vector_index)}); - } - try writer.writeAll(") "); - } - if (info.@"addrspace" != .generic) { - try writer.print("addrspace(.{s}) ", .{@tagName(info.@"addrspace")}); - } - if (!info.mutable) try writer.writeAll("const "); - if (info.@"volatile") try writer.writeAll("volatile "); - if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero "); - - try print(info.pointee_type, writer, mod); - }, - .error_set => { const names = ty.castTag(.error_set).?.data.names.keys(); try writer.writeAll("error{"); @@ -951,8 +841,8 @@ pub const Type = struct { try writer.writeAll("..."); } try writer.writeAll(") "); - if (fn_info.alignment != 0) { - try writer.print("align({d}) ", .{fn_info.alignment}); + if (fn_info.alignment.toByteUnitsOptional()) |a| { + try writer.print("align({d}) ", .{a}); } if (fn_info.cc != .Unspecified) { try writer.writeAll("callconv(."); @@ -1032,20 +922,6 @@ pub const Type = struct { .error_set_merged, => return true, - // Pointers to zero-bit types still have a runtime address; however, pointers - // to comptime-only types do not, with the exception of function pointers. - .pointer => { - if (ignore_comptime_only) { - return true; - } else if (ty.childType(mod).zigTypeTag(mod) == .Fn) { - return !mod.typeToFunc(ty.childType(mod)).?.is_generic; - } else if (strat == .sema) { - return !(try strat.sema.typeRequiresComptime(ty)); - } else { - return !comptimeOnly(ty, mod); - } - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -1231,8 +1107,6 @@ pub const Type = struct { .empty_struct_type => false, .none => switch (ty.tag()) { - .pointer => true, - .error_set, .error_set_single, .error_set_inferred, @@ -1410,51 +1284,27 @@ pub const Type = struct { } pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => { - const ptr_info = ty.castTag(.pointer).?.data; - if (ptr_info.@"align" != 0) { - return ptr_info.@"align"; - } else if (opt_sema) |sema| { - const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } else { - return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - } - }, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| { - if (ptr_type.alignment != 0) { - return @intCast(u32, ptr_type.alignment); - } else if (opt_sema) |sema| { - const res = try ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } else { - return (ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - } - }, - .opt_type => |child| return child.toType().ptrAlignmentAdvanced(mod, opt_sema), - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| { + if (ptr_type.alignment.toByteUnitsOptional()) |a| { + return @intCast(u32, a); + } else if (opt_sema) |sema| { + const res = try ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .{ .sema = sema }); + return res.scalar; + } else { + return (ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; + } }, - } + .opt_type => |child| child.toType().ptrAlignmentAdvanced(mod, opt_sema), + else => unreachable, + }; } pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.@"addrspace", - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| ptr_type.address_space, - .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.address_space, - else => unreachable, - }, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.address_space, + .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.address_space, + else => unreachable, }; } @@ -1504,7 +1354,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, .none => switch (ty.tag()) { - .pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, // TODO revisit this when we have the concept of the error tag type .error_set_inferred, @@ -1541,10 +1390,11 @@ pub const Type = struct { .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), .error_union_type => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), // represents machine code; not a pointer - .func_type => |func_type| { - const alignment = @intCast(u32, func_type.alignment); - if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment }; - return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; + .func_type => |func_type| return AbiAlignmentAdvanced{ + .scalar = if (func_type.alignment.toByteUnitsOptional()) |a| + @intCast(u32, a) + else + target_util.defaultFunctionAlignment(target), }, .simple_type => |t| switch (t) { @@ -1882,11 +1732,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, - else => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - }, - // TODO revisit this when we have the concept of the error tag type .error_set_inferred, .error_set, @@ -2201,11 +2046,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth(), - }, - .error_set, .error_set_single, .error_set_inferred, @@ -2384,8 +2224,6 @@ pub const Type = struct { .inferred_alloc_mut, => true, - .pointer => ty.castTag(.pointer).?.data.size == .One, - else => false, }, else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -2408,8 +2246,6 @@ pub const Type = struct { .inferred_alloc_mut, => .One, - .pointer => ty.castTag(.pointer).?.data.size, - else => null, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -2421,10 +2257,7 @@ pub const Type = struct { pub fn isSlice(ty: Type, mod: *const Module) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.size == .Slice, - else => false, - }, + .none => false, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.size == .Slice, else => false, @@ -2432,50 +2265,14 @@ pub const Type = struct { }; } - pub const SlicePtrFieldTypeBuffer = union { - pointer: Payload.Pointer, - }; - - pub fn slicePtrFieldType(ty: Type, buffer: *SlicePtrFieldTypeBuffer, mod: *const Module) Type { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => { - const payload = ty.castTag(.pointer).?.data; - assert(payload.size == .Slice); - - buffer.* = .{ - .pointer = .{ - .data = .{ - .pointee_type = payload.pointee_type, - .sentinel = payload.sentinel, - .@"align" = payload.@"align", - .@"addrspace" = payload.@"addrspace", - .bit_offset = payload.bit_offset, - .host_size = payload.host_size, - .vector_index = payload.vector_index, - .@"allowzero" = payload.@"allowzero", - .mutable = payload.mutable, - .@"volatile" = payload.@"volatile", - .size = .Many, - }, - }, - }; - return Type.initPayload(&buffer.pointer.base); - }, - - else => unreachable, - }, - else => return mod.intern_pool.slicePtrType(ty.ip_index).toType(), - } + pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type { + return mod.intern_pool.slicePtrType(ty.ip_index).toType(); } pub fn isConstPtr(ty: Type, mod: *const Module) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => !ty.castTag(.pointer).?.data.mutable, - else => false, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .none => false, + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.is_const, else => false, }, @@ -2488,10 +2285,7 @@ pub const Type = struct { pub fn isVolatilePtrIp(ty: Type, ip: InternPool) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.@"volatile", - else => false, - }, + .none => false, else => switch (ip.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.is_volatile, else => false, @@ -2501,12 +2295,10 @@ pub const Type = struct { pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.@"allowzero", - else => ty.zigTypeTag(mod) == .Optional, - }, + .none => false, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.is_allowzero, + .opt_type => true, else => false, }, }; @@ -2514,10 +2306,7 @@ pub const Type = struct { pub fn isCPtr(ty: Type, mod: *const Module) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.size == .C, - else => false, - }, + .none => false, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.size == .C, else => false, @@ -2526,16 +2315,9 @@ pub const Type = struct { } pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return false, - .One, .Many, .C => return true, - }, - - else => return false, - }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (ty.ip_index) { + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice => false, .One, .Many, .C => true, @@ -2549,7 +2331,7 @@ pub const Type = struct { }, else => false, }, - } + }; } /// For pointer-like optionals, returns true, otherwise returns the allowzero property @@ -2563,47 +2345,43 @@ pub const Type = struct { /// See also `isPtrLikeOptional`. pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .opt_type => |child| switch (child.toType().zigTypeTag(mod)) { - .Pointer => { - const info = child.toType().ptrInfo(mod); - switch (info.size) { - .C => return false, - else => return !info.@"allowzero", - } + return switch (ty.ip_index) { + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .opt_type => |child| switch (child.toType().zigTypeTag(mod)) { + .Pointer => { + const info = child.toType().ptrInfo(mod); + return switch (info.size) { + .C => false, + else => !info.@"allowzero", + }; + }, + .ErrorSet => true, + else => false, }, - .ErrorSet => true, else => false, }, - else => false, }; - switch (ty.tag()) { - .pointer => return ty.castTag(.pointer).?.data.size == .C, - - else => return false, - } } /// Returns true if the type is optional and would be lowered to a single pointer /// address value, using 0 for null. Note that this returns true for C pointers. /// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`. pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| ptr_type.size == .C, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |ptr_type| switch (ptr_type.size) { - .Slice, .C => false, - .Many, .One => !ptr_type.is_allowzero, + return switch (ty.ip_index) { + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.size == .C, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice, .C => false, + .Many, .One => !ptr_type.is_allowzero, + }, + else => false, }, else => false, }, - else => false, }; - switch (ty.tag()) { - .pointer => return ty.castTag(.pointer).?.data.size == .C, - - else => return false, - } } /// For *[N]T, returns [N]T. @@ -2614,14 +2392,7 @@ pub const Type = struct { } pub fn childTypeIp(ty: Type, ip: InternPool) Type { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.pointee_type, - - else => unreachable, - }, - else => ip.childType(ty.ip_index).toType(), - }; + return ip.childType(ty.ip_index).toType(); } /// For *[N]T, returns T. @@ -2634,34 +2405,19 @@ pub const Type = struct { /// For []T, returns T. /// For anyframe->T, returns T. pub fn elemType2(ty: Type, mod: *const Module) Type { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => { - const info = ty.castTag(.pointer).?.data; - const child_ty = info.pointee_type; - if (info.size == .One) { - return child_ty.shallowElemType(mod); - } else { - return child_ty; - } - }, - - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .One => ptr_type.elem_type.toType().shallowElemType(mod), + .Many, .C, .Slice => ptr_type.elem_type.toType(), }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| switch (ptr_type.size) { - .One => ptr_type.elem_type.toType().shallowElemType(mod), - .Many, .C, .Slice => ptr_type.elem_type.toType(), - }, - .anyframe_type => |child| { - assert(child != .none); - return child.toType(); - }, - .vector_type => |vector_type| vector_type.child.toType(), - .array_type => |array_type| array_type.child.toType(), - .opt_type => |child| mod.intern_pool.childType(child).toType(), - else => unreachable, + .anyframe_type => |child| { + assert(child != .none); + return child.toType(); }, + .vector_type => |vector_type| vector_type.child.toType(), + .array_type => |array_type| array_type.child.toType(), + .opt_type => |child| mod.intern_pool.childType(child).toType(), + else => unreachable, }; } @@ -2683,21 +2439,13 @@ pub const Type = struct { /// Asserts that the type is an optional. /// Note that for C pointers this returns the type unmodified. pub fn optionalChild(ty: Type, mod: *const Module) Type { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer, // here we assume it is a C pointer - => return ty, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .opt_type => |child| child.toType(), - .ptr_type => |ptr_type| b: { - assert(ptr_type.size == .C); - break :b ty; - }, - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .opt_type => |child| child.toType(), + .ptr_type => |ptr_type| b: { + assert(ptr_type.size == .C); + break :b ty; }, + else => unreachable, }; } @@ -2921,23 +2669,16 @@ pub const Type = struct { /// Asserts the type is an array, pointer or vector. pub fn sentinel(ty: Type, mod: *const Module) ?Value { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.sentinel, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .vector_type, - .struct_type, - .anon_struct_type, - => null, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .vector_type, + .struct_type, + .anon_struct_type, + => null, - .array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, - .ptr_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, + .array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, + .ptr_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, - else => unreachable, - }, + else => unreachable, }; } @@ -3196,7 +2937,6 @@ pub const Type = struct { .error_set, .error_set_merged, .error_set_inferred, - .pointer, => return null, .inferred_alloc_const => unreachable, @@ -3400,15 +3140,6 @@ pub const Type = struct { .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, - .pointer => { - const child_ty = ty.childType(mod); - if (child_ty.zigTypeTag(mod) == .Fn) { - return false; - } else { - return child_ty.comptimeOnly(mod); - } - }, - .error_union => return ty.errorUnionPayload().comptimeOnly(mod), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -4096,7 +3827,6 @@ pub const Type = struct { inferred_alloc_const, // See last_no_payload_tag below. // After this, the tag requires a payload. - pointer, error_union, error_set, error_set_single, @@ -4117,7 +3847,6 @@ pub const Type = struct { .error_set_inferred => Payload.ErrorSetInferred, .error_set_merged => Payload.ErrorSetMerged, - .pointer => Payload.Pointer, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, }; @@ -4230,10 +3959,8 @@ pub const Type = struct { data: *Module.Fn.InferredErrorSet, }; + /// TODO: remove this data structure since we have `InternPool.Key.PtrType`. pub const Pointer = struct { - pub const base_tag = Tag.pointer; - - base: Payload = Payload{ .tag = base_tag }, data: Data, pub const Data = struct { @@ -4270,7 +3997,7 @@ pub const Type = struct { return .{ .pointee_type = p.elem_type.toType(), .sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null, - .@"align" = @intCast(u32, p.alignment), + .@"align" = @intCast(u32, p.alignment.toByteUnits(0)), .@"addrspace" = p.address_space, .bit_offset = p.bit_offset, .host_size = p.host_size, @@ -4368,11 +4095,11 @@ pub const Type = struct { pub const err_int = Type.u16; pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type { - var d = data; + // TODO: update callsites of this function to directly call mod.ptrType + // and then delete this function. + _ = arena; - if (d.size == .C) { - d.@"allowzero" = true; - } + var d = data; // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee // type, we change it to 0 here. If this causes an assertion trip because the @@ -4396,32 +4123,19 @@ pub const Type = struct { } } - ip: { - if (d.pointee_type.ip_index == .none) break :ip; - - if (d.sentinel) |s| { - switch (s.ip_index) { - .none, .null_value => break :ip, - else => {}, - } - } - - return mod.ptrType(.{ - .elem_type = d.pointee_type.ip_index, - .sentinel = if (d.sentinel) |s| s.ip_index else .none, - .alignment = d.@"align", - .host_size = d.host_size, - .bit_offset = d.bit_offset, - .vector_index = d.vector_index, - .size = d.size, - .is_const = !d.mutable, - .is_volatile = d.@"volatile", - .is_allowzero = d.@"allowzero", - .address_space = d.@"addrspace", - }); - } - - return Type.Tag.pointer.create(arena, d); + return mod.ptrType(.{ + .elem_type = d.pointee_type.ip_index, + .sentinel = if (d.sentinel) |s| s.ip_index else .none, + .alignment = InternPool.Alignment.fromByteUnits(d.@"align"), + .host_size = d.host_size, + .bit_offset = d.bit_offset, + .vector_index = d.vector_index, + .size = d.size, + .is_const = !d.mutable, + .is_volatile = d.@"volatile", + .is_allowzero = d.@"allowzero", + .address_space = d.@"addrspace", + }); } pub fn array( diff --git a/src/value.zig b/src/value.zig index 35d144f91227..310049608599 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1844,8 +1844,7 @@ pub const Value = struct { return false; } - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); return eqlAdvanced(a_payload.ptr, ptr_ty, b_payload.ptr, ptr_ty, mod, opt_sema); }, @@ -2001,8 +2000,7 @@ pub const Value = struct { return false; } - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); const a_ptr = switch (a_ty.ptrSize(mod)) { .Slice => a.slicePtr(), .One => a, @@ -2121,8 +2119,7 @@ pub const Value = struct { .Bool, .Int, .ComptimeInt, .Pointer => switch (val.tag()) { .slice => { const slice = val.castTag(.slice).?.data; - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); hash(slice.ptr, ptr_ty, hasher, mod); hash(slice.len, Type.usize, hasher, mod); }, @@ -2253,8 +2250,7 @@ pub const Value = struct { .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { .slice => { const slice = val.castTag(.slice).?.data; - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); slice.ptr.hashUncoerced(ptr_ty, hasher, mod); }, else => val.hashPtr(hasher, mod), From 9ff514b6a35b7201f45f8bff31c61b4f8cfa7a7a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 20 May 2023 12:09:07 -0700 Subject: [PATCH 072/205] compiler: move error union types and error set types to InternPool One change worth noting in this commit is that `module.global_error_set` is no longer kept strictly up-to-date. The previous code reserved integer error values when dealing with error set types, but this is no longer needed because the integer values are not needed for semantic analysis unless `@errorToInt` or `@intToError` are used and therefore may be assigned lazily. --- src/Air.zig | 2 +- src/InternPool.zig | 176 ++++- src/Liveness.zig | 2 +- src/Liveness/Verify.zig | 2 +- src/Module.zig | 169 +++-- src/Sema.zig | 821 ++++++++++------------ src/TypedValue.zig | 6 +- src/arch/aarch64/CodeGen.zig | 18 +- src/arch/arm/CodeGen.zig | 18 +- src/arch/sparc64/CodeGen.zig | 20 +- src/arch/wasm/CodeGen.zig | 41 +- src/arch/x86_64/CodeGen.zig | 28 +- src/codegen.zig | 12 +- src/codegen/c.zig | 39 +- src/codegen/c/type.zig | 4 +- src/codegen/llvm.zig | 74 +- src/codegen/spirv.zig | 13 +- src/link/Dwarf.zig | 54 +- src/print_air.zig | 1 - src/type.zig | 1259 +++++++++++----------------------- src/value.zig | 18 +- 21 files changed, 1195 insertions(+), 1582 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 09f8d6c9e2e2..6673a37fb61e 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1411,7 +1411,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .@"try" => { const err_union_ty = air.typeOf(datas[inst].pl_op.operand, ip); - return err_union_ty.errorUnionPayload(); + return ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type.toType(); }, .work_item_id, diff --git a/src/InternPool.zig b/src/InternPool.zig index 81035bffc552..79506c4404b9 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -34,6 +34,14 @@ allocated_unions: std.SegmentedList(Module.Union, 0) = .{}, /// When a Union object is freed from `allocated_unions`, it is pushed into this stack. unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, +/// InferredErrorSet objects are stored in this data structure because: +/// * They contain pointers such as the errors map and the set of other inferred error sets. +/// * They need to be mutated after creation. +allocated_inferred_error_sets: std.SegmentedList(Module.Fn.InferredErrorSet, 0) = .{}, +/// When a Struct object is freed from `allocated_inferred_error_sets`, it is +/// pushed into this stack. +inferred_error_sets_free_list: std.ArrayListUnmanaged(Module.Fn.InferredErrorSet.Index) = .{}, + /// Some types such as enums, structs, and unions need to store mappings from field names /// to field index, or value to field index. In such cases, they will store the underlying /// field names and values directly, relying on one of these maps, stored separately, @@ -113,6 +121,12 @@ pub const NullTerminatedString = enum(u32) { return std.hash.uint32(@enumToInt(a)); } }; + + /// Compare based on integer value alone, ignoring the string contents. + pub fn indexLessThan(ctx: void, a: NullTerminatedString, b: NullTerminatedString) bool { + _ = ctx; + return @enumToInt(a) < @enumToInt(b); + } }; /// An index into `string_bytes` which might be `none`. @@ -135,10 +149,7 @@ pub const Key = union(enum) { /// `anyframe->T`. The payload is the child type, which may be `none` to indicate /// `anyframe`. anyframe_type: Index, - error_union_type: struct { - error_set_type: Index, - payload_type: Index, - }, + error_union_type: ErrorUnionType, simple_type: SimpleType, /// This represents a struct that has been explicitly declared in source code, /// or was created with `@Type`. It is unique and based on a declaration. @@ -152,6 +163,8 @@ pub const Key = union(enum) { opaque_type: OpaqueType, enum_type: EnumType, func_type: FuncType, + error_set_type: ErrorSetType, + inferred_error_set_type: Module.Fn.InferredErrorSet.Index, /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented /// via `simple_value` and has a named `Index` tag for it. @@ -183,6 +196,26 @@ pub const Key = union(enum) { pub const IntType = std.builtin.Type.Int; + pub const ErrorUnionType = struct { + error_set_type: Index, + payload_type: Index, + }; + + pub const ErrorSetType = struct { + /// Set of error names, sorted by null terminated string index. + names: []const NullTerminatedString, + /// This is ignored by `get` but will always be provided by `indexToKey`. + names_map: OptionalMapIndex = .none, + + /// Look up field index based on field name. + pub fn nameIndex(self: ErrorSetType, ip: *const InternPool, name: NullTerminatedString) ?u32 { + const map = &ip.maps.items[@enumToInt(self.names_map.unwrap().?)]; + const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; + const field_index = map.getIndexAdapted(name, adapter) orelse return null; + return @intCast(u32, field_index); + } + }; + pub const PtrType = struct { elem_type: Index, sentinel: Index = .none, @@ -507,6 +540,7 @@ pub const Key = union(enum) { .un, .undef, .enum_tag, + .inferred_error_set_type, => |info| std.hash.autoHash(hasher, info), .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), @@ -535,7 +569,7 @@ pub const Key = union(enum) { .ptr => |ptr| { std.hash.autoHash(hasher, ptr.ty); // Int-to-ptr pointers are hashed separately than decl-referencing pointers. - // This is sound due to pointer province rules. + // This is sound due to pointer provenance rules. switch (ptr.addr) { .int => |int| std.hash.autoHash(hasher, int), .decl => @panic("TODO"), @@ -547,6 +581,10 @@ pub const Key = union(enum) { for (aggregate.fields) |field| std.hash.autoHash(hasher, field); }, + .error_set_type => |error_set_type| { + for (error_set_type.names) |elem| std.hash.autoHash(hasher, elem); + }, + .anon_struct_type => |anon_struct_type| { for (anon_struct_type.types) |elem| std.hash.autoHash(hasher, elem); for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem); @@ -726,6 +764,14 @@ pub const Key = union(enum) { std.mem.eql(Index, a_info.values, b_info.values) and std.mem.eql(NullTerminatedString, a_info.names, b_info.names); }, + .error_set_type => |a_info| { + const b_info = b.error_set_type; + return std.mem.eql(NullTerminatedString, a_info.names, b_info.names); + }, + .inferred_error_set_type => |a_info| { + const b_info = b.inferred_error_set_type; + return a_info == b_info; + }, .func_type => |a_info| { const b_info = b.func_type; @@ -752,6 +798,8 @@ pub const Key = union(enum) { .opt_type, .anyframe_type, .error_union_type, + .error_set_type, + .inferred_error_set_type, .simple_type, .struct_type, .union_type, @@ -1207,8 +1255,14 @@ pub const Tag = enum(u8) { /// If the child type is `none`, the type is `anyframe`. type_anyframe, /// An error union type. - /// data is payload to ErrorUnion. + /// data is payload to `Key.ErrorUnionType`. type_error_union, + /// An error set type. + /// data is payload to `ErrorSet`. + type_error_set, + /// The inferred error set type of a function. + /// data is `Module.Fn.InferredErrorSet.Index`. + type_inferred_error_set, /// An enum type with auto-numbered tag values. /// The enum is exhaustive. /// data is payload index to `EnumAuto`. @@ -1355,6 +1409,12 @@ pub const Tag = enum(u8) { aggregate, }; +/// Trailing: +/// 0. name: NullTerminatedString for each names_len +pub const ErrorSet = struct { + names_len: u32, +}; + /// Trailing: /// 0. param_type: Index for each params_len pub const TypeFunction = struct { @@ -1539,11 +1599,6 @@ pub const Array = struct { } }; -pub const ErrorUnion = struct { - error_set_type: Index, - payload_type: Index, -}; - /// Trailing: /// 0. field name: NullTerminatedString for each fields_len; declaration order /// 1. tag value: Index for each fields_len; declaration order @@ -1719,6 +1774,9 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.unions_free_list.deinit(gpa); ip.allocated_unions.deinit(gpa); + ip.inferred_error_sets_free_list.deinit(gpa); + ip.allocated_inferred_error_sets.deinit(gpa); + for (ip.maps.items) |*map| map.deinit(gpa); ip.maps.deinit(gpa); @@ -1798,7 +1856,18 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_optional => .{ .opt_type = @intToEnum(Index, data) }, .type_anyframe => .{ .anyframe_type = @intToEnum(Index, data) }, - .type_error_union => @panic("TODO"), + .type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) }, + .type_error_set => { + const error_set = ip.extraDataTrail(ErrorSet, data); + const names_len = error_set.data.names_len; + const names = ip.extra.items[error_set.end..][0..names_len]; + return .{ .error_set_type = .{ + .names = @ptrCast([]const NullTerminatedString, names), + } }; + }, + .type_inferred_error_set => .{ + .inferred_error_set_type = @intToEnum(Module.Fn.InferredErrorSet.Index, data), + }, .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, .type_struct => { @@ -2179,11 +2248,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .error_union_type => |error_union_type| { ip.items.appendAssumeCapacity(.{ .tag = .type_error_union, - .data = try ip.addExtra(gpa, ErrorUnion{ - .error_set_type = error_union_type.error_set_type, - .payload_type = error_union_type.payload_type, + .data = try ip.addExtra(gpa, error_union_type), + }); + }, + .error_set_type => |error_set_type| { + assert(error_set_type.names_map == .none); + assert(std.sort.isSorted(NullTerminatedString, error_set_type.names, {}, NullTerminatedString.indexLessThan)); + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, error_set_type.names); + const names_len = @intCast(u32, error_set_type.names.len); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(ErrorSet).Struct.fields.len + names_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_error_set, + .data = ip.addExtraAssumeCapacity(ErrorSet{ + .names_len = names_len, }), }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, error_set_type.names)); + }, + .inferred_error_set_type => |ies_index| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_inferred_error_set, + .data = @enumToInt(ies_index), + }); }, .simple_type => |simple_type| { ip.items.appendAssumeCapacity(.{ @@ -3192,12 +3279,26 @@ pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { } } +pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + if (tags[@enumToInt(val)] != .type_inferred_error_set) return .none; + const datas = ip.items.items(.data); + return @intToEnum(Module.Fn.InferredErrorSet.Index, datas[@enumToInt(val)]).toOptional(); +} + pub fn isOptionalType(ip: InternPool, ty: Index) bool { const tags = ip.items.items(.tag); if (ty == .none) return false; return tags[@enumToInt(ty)] == .type_optional; } +pub fn isInferredErrorSetType(ip: InternPool, ty: Index) bool { + const tags = ip.items.items(.tag); + assert(ty != .none); + return tags[@enumToInt(ty)] == .type_inferred_error_set; +} + pub fn dump(ip: InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } @@ -3258,7 +3359,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_slice => 0, .type_optional => 0, .type_anyframe => 0, - .type_error_union => @sizeOf(ErrorUnion), + .type_error_union => @sizeOf(Key.ErrorUnionType), + .type_error_set => b: { + const info = ip.extraData(ErrorSet, data); + break :b @sizeOf(ErrorSet) + (@sizeOf(u32) * info.names_len); + }, + .type_inferred_error_set => @sizeOf(Module.Fn.InferredErrorSet), .type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit), .type_enum_auto => @sizeOf(EnumAuto), .type_opaque => @sizeOf(Key.OpaqueType), @@ -3359,6 +3465,14 @@ pub fn unionPtr(ip: *InternPool, index: Module.Union.Index) *Module.Union { return ip.allocated_unions.at(@enumToInt(index)); } +pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.Index) *Module.Fn.InferredErrorSet { + return ip.allocated_inferred_error_sets.at(@enumToInt(index)); +} + +pub fn inferredErrorSetPtrConst(ip: InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { + return ip.allocated_inferred_error_sets.at(@enumToInt(index)); +} + pub fn createStruct( ip: *InternPool, gpa: Allocator, @@ -3397,6 +3511,25 @@ pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) }; } +pub fn createInferredErrorSet( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Fn.InferredErrorSet, +) Allocator.Error!Module.Fn.InferredErrorSet.Index { + if (ip.inferred_error_sets_free_list.popOrNull()) |index| return index; + const ptr = try ip.allocated_inferred_error_sets.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Fn.InferredErrorSet.Index, ip.allocated_inferred_error_sets.len - 1); +} + +pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.Fn.InferredErrorSet.Index) void { + ip.inferredErrorSetPtr(index).* = undefined; + ip.inferred_error_sets_free_list.append(gpa, index) catch { + // In order to keep `destroyInferredErrorSet` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the InferredErrorSet until garbage collection. + }; +} + pub fn getOrPutString( ip: *InternPool, gpa: Allocator, @@ -3459,3 +3592,14 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { else => unreachable, }; } + +pub fn isNoReturn(ip: InternPool, ty: InternPool.Index) bool { + return switch (ty) { + .noreturn_type => true, + else => switch (ip.indexToKey(ty)) { + .error_set_type => |error_set_type| error_set_type.names.len == 0, + .enum_type => |enum_type| enum_type.names.len == 0, + else => false, + }, + }; +} diff --git a/src/Liveness.zig b/src/Liveness.zig index da705cfab80f..856123fa9d27 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -1416,7 +1416,7 @@ fn analyzeInstBlock( // If the block is noreturn, block deaths not only aren't useful, they're impossible to // find: there could be more stuff alive after the block than before it! - if (!a.air.getRefType(ty_pl.ty).isNoReturn()) { + if (!a.intern_pool.isNoReturn(a.air.getRefType(ty_pl.ty).ip_index)) { // The block kills the difference in the live sets const block_scope = data.block_scopes.get(inst).?; const num_deaths = data.live_set.count() - block_scope.live_set.count(); diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index dbdbf321740c..923e6f56589e 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -453,7 +453,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (block_liveness.deaths) |death| try self.verifyDeath(inst, death); - if (block_ty.isNoReturn()) { + if (ip.isNoReturn(block_ty.toIntern())) { assert(!self.blocks.contains(inst)); } else { var live = self.blocks.fetchRemove(inst).?.value; diff --git a/src/Module.zig b/src/Module.zig index 5cd0d237b4d8..70b08ea3a98f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -960,38 +960,6 @@ pub const EmitH = struct { fwd_decl: ArrayListUnmanaged(u8) = .{}, }; -/// Represents the data that an explicit error set syntax provides. -pub const ErrorSet = struct { - /// The Decl that corresponds to the error set itself. - owner_decl: Decl.Index, - /// The string bytes are stored in the owner Decl arena. - /// These must be in sorted order. See sortNames. - names: NameMap, - - pub const NameMap = std.StringArrayHashMapUnmanaged(void); - - pub fn srcLoc(self: ErrorSet, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(mod), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - /// sort the NameMap. This should be called whenever the map is modified. - /// alloc should be the allocator used for the NameMap data. - pub fn sortNames(names: *NameMap) void { - const Context = struct { - keys: [][]const u8, - pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { - return std.mem.lessThan(u8, ctx.keys[a_index], ctx.keys[b_index]); - } - }; - names.sort(Context{ .keys = names.keys() }); - } -}; - pub const PropertyBoolean = enum { no, yes, unknown, wip }; /// Represents the data that a struct declaration provides. @@ -1530,13 +1498,6 @@ pub const Fn = struct { is_noinline: bool, calls_or_awaits_errorable_fn: bool = false, - /// Any inferred error sets that this function owns, both its own inferred error set and - /// inferred error sets of any inline/comptime functions called. Not to be confused - /// with inferred error sets of generic instantiations of this function, which are - /// *not* tracked here - they are tracked in the new `Fn` object created for the - /// instantiations. - inferred_error_sets: InferredErrorSetList = .{}, - pub const Analysis = enum { /// This function has not yet undergone analysis, because we have not /// seen a potential runtime call. It may be analyzed in future. @@ -1568,10 +1529,10 @@ pub const Fn = struct { /// direct additions via `return error.Foo;`, and possibly also errors that /// are returned from any dependent functions. When the inferred error set is /// fully resolved, this map contains all the errors that the function might return. - errors: ErrorSet.NameMap = .{}, + errors: NameMap = .{}, /// Other inferred error sets which this inferred error set should include. - inferred_error_sets: std.AutoArrayHashMapUnmanaged(*InferredErrorSet, void) = .{}, + inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{}, /// Whether the function returned anyerror. This is true if either of /// the dependent functions returns anyerror. @@ -1581,51 +1542,59 @@ pub const Fn = struct { /// can skip resolving any dependents of this inferred error set. is_resolved: bool = false, - pub fn addErrorSet(self: *InferredErrorSet, gpa: Allocator, err_set_ty: Type) !void { + pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); + + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + + pub fn addErrorSet( + self: *InferredErrorSet, + err_set_ty: Type, + ip: *InternPool, + gpa: Allocator, + ) !void { switch (err_set_ty.ip_index) { .anyerror_type => { self.is_anyerror = true; }, - .none => switch (err_set_ty.tag()) { - .error_set => { - const names = err_set_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { + else => switch (ip.indexToKey(err_set_ty.ip_index)) { + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { try self.errors.put(gpa, name, {}); } }, - .error_set_single => { - const name = err_set_ty.castTag(.error_set_single).?.data; - try self.errors.put(gpa, name, {}); - }, - .error_set_inferred => { - const ies = err_set_ty.castTag(.error_set_inferred).?.data; - try self.inferred_error_sets.put(gpa, ies, {}); - }, - .error_set_merged => { - const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - try self.errors.put(gpa, name, {}); - } + .inferred_error_set_type => |ies_index| { + try self.inferred_error_sets.put(gpa, ies_index, {}); }, else => unreachable, }, - else => @panic("TODO"), } } }; - pub const InferredErrorSetList = std.SinglyLinkedList(InferredErrorSet); - pub const InferredErrorSetListNode = InferredErrorSetList.Node; - + /// TODO: remove this function pub fn deinit(func: *Fn, gpa: Allocator) void { - var it = func.inferred_error_sets.first; - while (it) |node| { - const next = node.next; - node.data.errors.deinit(gpa); - node.data.inferred_error_sets.deinit(gpa); - gpa.destroy(node); - it = next; - } + _ = func; + _ = gpa; } pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool { @@ -3508,6 +3477,10 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { return mod.intern_pool.structPtr(index); } +pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.InferredErrorSet { + return mod.intern_pool.inferredErrorSetPtr(index); +} + /// This one accepts an index from the InternPool and asserts that it is not /// the anonymous empty struct type. pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { @@ -4722,7 +4695,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl_tv.ty.fmt(mod), }); } - const ty = try decl_tv.val.toType().copy(decl_arena_allocator); + const ty = decl_tv.val.toType(); if (ty.getNamespace(mod) == null) { return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)}); } @@ -4756,7 +4729,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { } decl.clearValues(mod); - decl.ty = try decl_tv.ty.copy(decl_arena_allocator); + decl.ty = decl_tv.ty; decl.val = try decl_tv.val.copy(decl_arena_allocator); // linksection, align, and addrspace were already set by Sema decl.has_tv = true; @@ -4823,7 +4796,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { }, } - decl.ty = try decl_tv.ty.copy(decl_arena_allocator); + decl.ty = decl_tv.ty; decl.val = try decl_tv.val.copy(decl_arena_allocator); decl.@"align" = blk: { const align_ref = decl.zirAlignRef(mod); @@ -6599,7 +6572,7 @@ pub fn populateTestFunctions( // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. const new_ty = try Type.ptr(arena, mod, .{ .size = .Slice, - .pointee_type = try tmp_test_fn_ty.copy(arena), + .pointee_type = tmp_test_fn_ty, .mutable = false, .@"addrspace" = .generic, }); @@ -6877,6 +6850,42 @@ pub fn anyframeType(mod: *Module, payload_ty: Type) Allocator.Error!Type { return (try intern(mod, .{ .anyframe_type = payload_ty.toIntern() })).toType(); } +pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Allocator.Error!Type { + return (try intern(mod, .{ .error_union_type = .{ + .error_set_type = error_set_ty.toIntern(), + .payload_type = payload_ty.toIntern(), + } })).toType(); +} + +pub fn singleErrorSetType(mod: *Module, name: []const u8) Allocator.Error!Type { + const gpa = mod.gpa; + const ip = &mod.intern_pool; + return singleErrorSetTypeNts(mod, try ip.getOrPutString(gpa, name)); +} + +pub fn singleErrorSetTypeNts(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type { + const gpa = mod.gpa; + const ip = &mod.intern_pool; + const names = [1]InternPool.NullTerminatedString{name}; + const i = try ip.get(gpa, .{ .error_set_type = .{ .names = &names } }); + return i.toType(); +} + +/// Sorts `names` in place. +pub fn errorSetFromUnsortedNames( + mod: *Module, + names: []InternPool.NullTerminatedString, +) Allocator.Error!Type { + std.mem.sort( + InternPool.NullTerminatedString, + names, + {}, + InternPool.NullTerminatedString.indexLessThan, + ); + const new_ty = try mod.intern(.{ .error_set_type = .{ .names = names } }); + return new_ty.toType(); +} + /// Supports optionals in addition to pointers. pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { if (ty.isPtrLikeOptional(mod)) { @@ -7240,6 +7249,16 @@ pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { return mod.intern_pool.indexToFuncType(ty.ip_index); } +pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet { + const index = typeToInferredErrorSetIndex(mod, ty).unwrap() orelse return null; + return mod.inferredErrorSetPtr(index); +} + +pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) Fn.InferredErrorSet.OptionalIndex { + if (ty.ip_index == .none) return .none; + return mod.intern_pool.indexToInferredErrorSetType(ty.ip_index); +} + pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { @setCold(true); const owner_decl = mod.declPtr(owner_decl_index); diff --git a/src/Sema.zig b/src/Sema.zig index 74efe9d141fa..be505d74a3cf 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -825,12 +825,13 @@ pub fn analyzeBodyBreak( block: *Block, body: []const Zir.Inst.Index, ) CompileError!?BreakData { + const mod = sema.mod; const break_inst = sema.analyzeBodyInner(block, body) catch |err| switch (err) { error.ComptimeBreak => sema.comptime_break_inst, else => |e| return e, }; if (block.instructions.items.len != 0 and - sema.typeOf(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1])).isNoReturn()) + sema.typeOf(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1])).isNoReturn(mod)) return null; const break_data = sema.code.instructions.items(.data)[break_inst].@"break"; const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data; @@ -1701,7 +1702,7 @@ fn analyzeBodyInner( break :blk Air.Inst.Ref.void_value; }, }; - if (sema.typeOf(air_inst).isNoReturn()) + if (sema.typeOf(air_inst).isNoReturn(mod)) break always_noreturn; map.putAssumeCapacity(inst, air_inst); i += 1; @@ -1796,8 +1797,7 @@ fn analyzeAsType( const wanted_type = Type.type; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, "types must be comptime-known"); - const ty = val.toType(); - return ty.copy(sema.arena); + return val.toType(); } pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void { @@ -2004,7 +2004,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, - .const_ty => return try air_datas[i].ty.toValue(sema.arena), + .const_ty => return air_datas[i].ty.toValue(), .interned => return air_datas[i].interned.toValue(), else => return null, } @@ -2131,7 +2131,7 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec }; return sema.failWithOwnedErrorMsg(msg); } else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: { - const child_ty = inner_ty.errorUnionPayload(); + const child_ty = inner_ty.errorUnionPayload(mod); if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err; const msg = msg: { const msg = try sema.errMsg(block, src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); @@ -2473,7 +2473,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); iac.data.decl_index = try anon_decl.finish( - try pointee_ty.copy(anon_decl.arena()), + pointee_ty, Value.undef, iac.data.alignment, ); @@ -3250,47 +3250,35 @@ fn zirErrorSetDecl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index); - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const error_set = try new_decl_arena_allocator.create(Module.ErrorSet); - const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set); - const error_set_val = try Value.Tag.ty.create(new_decl_arena_allocator, error_set_ty); - const mod = sema.mod; - const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = error_set_val, - }, name_strategy, "error", inst); - const new_decl = mod.declPtr(new_decl_index); - new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); - - var names = Module.ErrorSet.NameMap{}; - try names.ensureUnusedCapacity(new_decl_arena_allocator, extra.data.fields_len); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; + try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len); var extra_index = @intCast(u32, extra.end); const extra_index_end = extra_index + (extra.data.fields_len * 2); while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const str_index = sema.code.extra[extra_index]; - const kv = try mod.getErrorValue(sema.code.nullTerminatedString(str_index)); - const result = names.getOrPutAssumeCapacity(kv.key); + const name = sema.code.nullTerminatedString(str_index); + const name_ip = try mod.intern_pool.getOrPutString(gpa, name); + const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen } - // names must be sorted. - Module.ErrorSet.sortNames(&names); + const error_set_ty = try mod.errorSetFromUnsortedNames(names.keys()); + + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ + .ty = Type.type, + .val = error_set_ty.toValue(), + }, name_strategy, "error", inst); + const new_decl = mod.declPtr(new_decl_index); + new_decl.owns_tv = true; + errdefer mod.abortAnonDecl(new_decl_index); - error_set.* = .{ - .owner_decl = new_decl_index, - .names = names, - }; - try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); } @@ -3407,7 +3395,7 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index else operand_ty; if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return; - const payload_ty = err_union_ty.errorUnionPayload().zigTypeTag(mod); + const payload_ty = err_union_ty.errorUnionPayload(mod).zigTypeTag(mod); if (payload_ty != .Void and payload_ty != .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "error union payload is ignored", .{}); @@ -3590,7 +3578,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try elem_ty.copy(anon_decl.arena()), + elem_ty, try store_val.copy(anon_decl.arena()), ptr_info.@"align", )); @@ -3722,7 +3710,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const var_is_mut = switch (sema.typeOf(ptr).tag()) { .inferred_alloc_const => false, .inferred_alloc_mut => true, - else => unreachable, }; const target = sema.mod.getTarget(); @@ -3733,7 +3720,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); const decl = sema.mod.declPtr(decl_index); - const final_elem_ty = try decl.ty.copy(sema.arena); + const final_elem_ty = decl.ty; const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = final_elem_ty, .mutable = true, @@ -3833,7 +3820,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const new_decl_index = try anon_decl.finish( - try final_elem_ty.copy(anon_decl.arena()), + final_elem_ty, try store_val.copy(anon_decl.arena()), inferred_alloc.data.alignment, ); @@ -5042,7 +5029,7 @@ fn storeToInferredAllocComptime( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); iac.data.decl_index = try anon_decl.finish( - try operand_ty.copy(anon_decl.arena()), + operand_ty, try operand_val.copy(anon_decl.arena()), iac.data.alignment, ); @@ -5286,6 +5273,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); @@ -5335,7 +5323,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError try sema.analyzeBody(&loop_block, body); const loop_block_len = loop_block.instructions.items.len; - if (loop_block_len > 0 and sema.typeOf(Air.indexToRef(loop_block.instructions.items[loop_block_len - 1])).isNoReturn()) { + if (loop_block_len > 0 and sema.typeOf(Air.indexToRef(loop_block.instructions.items[loop_block_len - 1])).isNoReturn(mod)) { // If the loop ended with a noreturn terminator, then there is no way for it to loop, // so we can just use the block instead. try child_block.instructions.appendSlice(gpa, loop_block.instructions.items); @@ -5588,7 +5576,7 @@ fn analyzeBlockBody( // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); - assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); + assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn(mod)); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions @@ -5755,7 +5743,7 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); break :blk try anon_decl.finish( - try operand.ty.copy(anon_decl.arena()), + operand.ty, try operand.val.copy(anon_decl.arena()), 0, ); @@ -6434,7 +6422,7 @@ fn zirCall( }; const return_ty = sema.typeOf(call_inst); - if (modifier != .always_tail and return_ty.isNoReturn()) + if (modifier != .always_tail and return_ty.isNoReturn(mod)) return call_inst; // call to "fn(...) noreturn", don't pop // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only @@ -6957,17 +6945,11 @@ fn analyzeCall( // Create a fresh inferred error set type for inline/comptime calls. const fn_ret_ty = blk: { if (module_fn.hasInferredErrorSet(mod)) { - const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); - node.data = .{ .func = module_fn }; - if (parent_func) |some| { - some.inferred_error_sets.prepend(node); - } - - const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); - break :blk try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = bare_return_type, + const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ + .func = module_fn, }); + const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); + break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); } break :blk bare_return_type; }; @@ -7843,21 +7825,21 @@ fn resolveGenericInstantiationType( // `GenericCallAdapter.eql` as well as function body analysis. // Whether it is anytype is communicated by `isAnytypeParam`. const arg = child_sema.inst_map.get(inst).?; - const copied_arg_ty = try child_sema.typeOf(arg).copy(new_decl_arena_allocator); + const arg_ty = child_sema.typeOf(arg); - if (try sema.typeRequiresComptime(copied_arg_ty)) { + if (try sema.typeRequiresComptime(arg_ty)) { is_comptime = true; } if (is_comptime) { const arg_val = (child_sema.resolveMaybeUndefValAllowVariables(arg) catch unreachable).?; child_sema.comptime_args[arg_i] = .{ - .ty = copied_arg_ty, + .ty = arg_ty, .val = try arg_val.copy(new_decl_arena_allocator), }; } else { child_sema.comptime_args[arg_i] = .{ - .ty = copied_arg_ty, + .ty = arg_ty, .val = Value.generic_poison, }; } @@ -7868,7 +7850,7 @@ fn resolveGenericInstantiationType( try wip_captures.finalize(); // Populate the Decl ty/val with the function and its type. - new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator); + new_decl.ty = child_sema.typeOf(new_func_inst); // If the call evaluated to a return type that requires comptime, never mind // our generic instantiation. Instead we need to perform a comptime call. const new_fn_info = mod.typeToFunc(new_decl.ty).?; @@ -8068,7 +8050,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr }); } try sema.validateErrorUnionPayloadType(block, payload, rhs_src); - const err_union_ty = try Type.errorUnion(sema.arena, error_set, payload, sema.mod); + const err_union_ty = try mod.errorUnionType(error_set, payload); return sema.addType(err_union_ty); } @@ -8087,16 +8069,13 @@ fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, p fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; - const tracy = trace(@src()); - defer tracy.end(); - + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - - // Create an anonymous error set type with only this error value, and return the value. - const kv = try sema.mod.getErrorValue(inst_data.get(sema.code)); - const result_type = try Type.Tag.error_set_single.create(sema.arena, kv.key); + const name = inst_data.get(sema.code); + // Create an error set type with only this error value, and return the value. + const kv = try sema.mod.getErrorValue(name); return sema.addConstant( - result_type, + try mod.singleErrorSetType(kv.key), try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key, }), @@ -8139,11 +8118,14 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const op_ty = sema.typeOf(uncasted_operand); try sema.resolveInferredErrorSetTy(block, src, op_ty); - if (!op_ty.isAnyError()) { - const names = op_ty.errorSetNames(); + if (!op_ty.isAnyError(mod)) { + const names = op_ty.errorSetNames(mod); switch (names.len) { 0 => return sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)), - 1 => return sema.addIntUnsigned(Type.err_int, sema.mod.global_error_set.get(names[0]).?), + 1 => { + const name = mod.intern_pool.stringToSlice(names[0]); + return sema.addIntUnsigned(Type.err_int, mod.global_error_set.get(name).?); + }, else => {}, } } @@ -8224,22 +8206,22 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr return Air.Inst.Ref.anyerror_type; } - if (lhs_ty.castTag(.error_set_inferred)) |payload| { - try sema.resolveInferredErrorSet(block, src, payload.data); + if (mod.typeToInferredErrorSetIndex(lhs_ty).unwrap()) |ies_index| { + try sema.resolveInferredErrorSet(block, src, ies_index); // isAnyError might have changed from a false negative to a true positive after resolution. - if (lhs_ty.isAnyError()) { + if (lhs_ty.isAnyError(mod)) { return Air.Inst.Ref.anyerror_type; } } - if (rhs_ty.castTag(.error_set_inferred)) |payload| { - try sema.resolveInferredErrorSet(block, src, payload.data); + if (mod.typeToInferredErrorSetIndex(rhs_ty).unwrap()) |ies_index| { + try sema.resolveInferredErrorSet(block, src, ies_index); // isAnyError might have changed from a false negative to a true positive after resolution. - if (rhs_ty.isAnyError()) { + if (rhs_ty.isAnyError(mod)) { return Air.Inst.Ref.anyerror_type; } } - const err_set_ty = try lhs_ty.errorSetMerge(sema.arena, rhs_ty); + const err_set_ty = try sema.errorSetMerge(lhs_ty, rhs_ty); return sema.addType(err_set_ty); } @@ -8484,7 +8466,7 @@ fn zirOptionalPayload( if (true) break :t operand_ty; const ptr_info = operand_ty.ptrInfo(mod); break :t try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = try ptr_info.pointee_type.copy(sema.arena), + .pointee_type = ptr_info.pointee_type, .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, @@ -8547,7 +8529,7 @@ fn analyzeErrUnionPayload( safety_check: bool, ) CompileError!Air.Inst.Ref { const mod = sema.mod; - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { if (val.getError()) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); @@ -8560,7 +8542,7 @@ fn analyzeErrUnionPayload( // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and - !err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) + !err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, operand, .unwrap_errunion_err, .is_non_err); } @@ -8603,7 +8585,7 @@ fn analyzeErrUnionPayloadPtr( } const err_union_ty = operand_ty.childType(mod); - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = payload_ty, .mutable = !operand_ty.isConstPtr(mod), @@ -8646,7 +8628,7 @@ fn analyzeErrUnionPayloadPtr( // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and - !err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) + !err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr); } @@ -8678,7 +8660,7 @@ fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air }); } - const result_ty = operand_ty.errorUnionSet(); + const result_ty = operand_ty.errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |val| { assert(val.getError() != null); @@ -8707,7 +8689,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE }); } - const result_ty = operand_ty.childType(mod).errorUnionSet(); + const result_ty = operand_ty.childType(mod).errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { @@ -8755,7 +8737,7 @@ fn zirFunc( extra_index += ret_ty_body.len; const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, "return type must be comptime-known"); - break :blk try ret_ty_val.toType().copy(sema.arena); + break :blk ret_ty_val.toType(); }, }; @@ -8927,6 +8909,7 @@ fn funcCommon( is_noinline: bool, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const gpa = sema.gpa; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset }; const func_src = LazySrcLoc.nodeOffset(src_node_offset); @@ -8955,16 +8938,12 @@ fn funcCommon( break :new_func new_func; } destroy_fn_on_error = true; - const new_func = try sema.gpa.create(Module.Fn); + const new_func = try gpa.create(Module.Fn); // Set this here so that the inferred return type can be printed correctly if it appears in an error. new_func.owner_decl = sema.owner_decl_index; break :new_func new_func; }; - errdefer if (destroy_fn_on_error) sema.gpa.destroy(new_func); - - var maybe_inferred_error_set_node: ?*Module.Fn.InferredErrorSetListNode = null; - errdefer if (maybe_inferred_error_set_node) |node| sema.gpa.destroy(node); - // Note: no need to errdefer since this will still be in its default state at the end of the function. + errdefer if (destroy_fn_on_error) gpa.destroy(new_func); const target = sema.mod.getTarget(); const fn_ty: Type = fn_ty: { @@ -9027,15 +9006,11 @@ fn funcCommon( bare_return_type else blk: { try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); - const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); - node.data = .{ .func = new_func }; - maybe_inferred_error_set_node = node; - - const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); - break :blk try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = bare_return_type, + const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ + .func = new_func, }); + const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); + break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); }; if (!return_type.isValidReturnType(mod)) { @@ -9044,7 +9019,7 @@ fn funcCommon( const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{ opaque_str, return_type.fmt(sema.mod), }); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, return_type); break :msg msg; @@ -9058,7 +9033,7 @@ fn funcCommon( const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{ return_type.fmt(sema.mod), @tagName(cc_resolved), }); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = sema.mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty); @@ -9182,8 +9157,8 @@ fn funcCommon( sema.owner_decl.@"addrspace" = address_space orelse .generic; if (is_extern) { - const new_extern_fn = try sema.gpa.create(Module.ExternFn); - errdefer sema.gpa.destroy(new_extern_fn); + const new_extern_fn = try gpa.create(Module.ExternFn); + errdefer gpa.destroy(new_extern_fn); new_extern_fn.* = Module.ExternFn{ .owner_decl = sema.owner_decl_index, @@ -9232,10 +9207,6 @@ fn funcCommon( .branch_quota = default_branch_quota, .is_noinline = is_noinline, }; - if (maybe_inferred_error_set_node) |node| { - new_func.inferred_error_sets.prepend(node); - } - maybe_inferred_error_set_node = null; fn_payload.* = .{ .base = .{ .tag = .function }, .data = new_func, @@ -10139,6 +10110,7 @@ fn zirSwitchCapture( defer tracy.end(); const mod = sema.mod; + const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); const capture_info = zir_datas[inst].switch_capture; const switch_info = zir_datas[capture_info.switch_inst].pl_node; @@ -10248,7 +10220,7 @@ fn zirSwitchCapture( const capture_src = raw_capture_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); const msg = try sema.errMsg(block, capture_src, "capture group with incompatible types", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const raw_first_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 0 } }; const first_item_src = raw_first_item_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); @@ -10294,20 +10266,16 @@ fn zirSwitchCapture( }, .ErrorSet => { if (is_multi) { - var names: Module.ErrorSet.NameMap = .{}; + var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, items.len); for (items) |item| { const item_ref = try sema.resolveInst(item); // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - names.putAssumeCapacityNoClobber( - item_val.getError().?, - {}, - ); + const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError().?); + names.putAssumeCapacityNoClobber(name_ip, {}); } - // names must be sorted - Module.ErrorSet.sortNames(&names); - const else_error_ty = try Type.Tag.error_set_merged.create(sema.arena, names); + const else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); return sema.bitCast(block, else_error_ty, operand, operand_src, null); } else { @@ -10315,7 +10283,7 @@ fn zirSwitchCapture( // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - const item_ty = try Type.Tag.error_set_single.create(sema.arena, item_val.getError().?); + const item_ty = try mod.singleErrorSetType(item_val.getError().?); return sema.bitCast(block, item_ty, operand, operand_src, null); } }, @@ -10678,7 +10646,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError try sema.resolveInferredErrorSetTy(block, src, operand_ty); - if (operand_ty.isAnyError()) { + if (operand_ty.isAnyError(mod)) { if (special_prong != .@"else") { return sema.fail( block, @@ -10692,7 +10660,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var maybe_msg: ?*Module.ErrorMsg = null; errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa); - for (operand_ty.errorSetNames()) |error_name| { + for (operand_ty.errorSetNames(mod)) |error_name_ip| { + const error_name = mod.intern_pool.stringToSlice(error_name_ip); if (!seen_errors.contains(error_name) and special_prong != .@"else") { const msg = maybe_msg orelse blk: { maybe_msg = try sema.errMsg( @@ -10720,7 +10689,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return sema.failWithOwnedErrorMsg(msg); } - if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames().len) { + if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames(mod).len) { // In order to enable common patterns for generic code allow simple else bodies // else => unreachable, // else => return, @@ -10757,18 +10726,18 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError ); } - const error_names = operand_ty.errorSetNames(); - var names: Module.ErrorSet.NameMap = .{}; + const error_names = operand_ty.errorSetNames(mod); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, error_names.len); - for (error_names) |error_name| { + for (error_names) |error_name_ip| { + const error_name = mod.intern_pool.stringToSlice(error_name_ip); if (seen_errors.contains(error_name)) continue; - names.putAssumeCapacityNoClobber(error_name, {}); + names.putAssumeCapacityNoClobber(error_name_ip, {}); } - - // names must be sorted - Module.ErrorSet.sortNames(&names); - else_error_ty = try Type.Tag.error_set_merged.create(sema.arena, names); + // No need to keep the hash map metadata correct; here we + // extract the (sorted) keys only. + else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); } }, .Int, .ComptimeInt => { @@ -11513,12 +11482,13 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } }, .ErrorSet => { - if (operand_ty.isAnyError()) { + if (operand_ty.isAnyError(mod)) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ operand_ty.fmt(mod), }); } - for (operand_ty.errorSetNames()) |error_name| { + for (operand_ty.errorSetNames(mod)) |error_name_ip| { + const error_name = mod.intern_pool.stringToSlice(error_name_ip); if (seen_errors.contains(error_name)) continue; cases_len += 1; @@ -11931,7 +11901,8 @@ fn validateSwitchNoRange( } fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, operand: Air.Inst.Ref) !bool { - if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) return false; + const mod = sema.mod; + if (!mod.backendSupportsFeature(.panic_unwrap_error)) return false; const tags = sema.code.instructions.items(.tag); for (body) |inst| { @@ -11967,7 +11938,7 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op .as_node => try sema.zirAsNode(block, inst), .field_val => try sema.zirFieldVal(block, inst), .@"unreachable" => { - if (!sema.mod.comp.formatted_panics) { + if (!mod.comp.formatted_panics) { try sema.safetyPanic(block, .unwrap_error); return true; } @@ -11990,7 +11961,7 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op }, else => unreachable, }; - if (sema.typeOf(air_inst).isNoReturn()) + if (sema.typeOf(air_inst).isNoReturn(mod)) return true; sema.inst_map.putAssumeCapacity(inst, air_inst); } @@ -12194,13 +12165,14 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const err_name = inst_data.get(sema.code); // Return the error code from the function. - const kv = try sema.mod.getErrorValue(err_name); + const kv = try mod.getErrorValue(err_name); const result_inst = try sema.addConstant( - try Type.Tag.error_set_single.create(sema.arena, kv.key), + try mod.singleErrorSetType(kv.key), try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), ); return result_inst; @@ -15737,7 +15709,7 @@ fn zirClosureCapture( Value.@"unreachable"; try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, .{ - .ty = try sema.typeOf(operand).copy(sema.perm_arena), + .ty = sema.typeOf(operand), .val = try val.copy(sema.perm_arena), }); } @@ -16223,10 +16195,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); try sema.ensureDeclAnalyzed(set_field_ty_decl_index); const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index); - break :t try set_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); + break :t set_field_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(try error_field_ty.copy(sema.arena)); + try sema.queueFullTypeResolution(error_field_ty); // If the error set is inferred it must be resolved at this point try sema.resolveInferredErrorSetTy(block, src, ty); @@ -16234,11 +16206,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise - const error_field_vals: ?[]Value = if (ty.isAnyError()) null else blk: { - const names = ty.errorSetNames(); + const error_field_vals: ?[]Value = if (ty.isAnyError(mod)) null else blk: { + const names = ty.errorSetNames(mod); const vals = try fields_anon_decl.arena().alloc(Value, names.len); - for (vals, 0..) |*field_val, i| { - const name = names[i]; + for (vals, names) |*field_val, name_ip| { + const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16301,9 +16273,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ErrorUnion => { const field_values = try sema.arena.alloc(Value, 2); // error_set: type, - field_values[0] = try Value.Tag.ty.create(sema.arena, ty.errorUnionSet()); + field_values[0] = try Value.Tag.ty.create(sema.arena, ty.errorUnionSet(mod)); // payload: type, - field_values[1] = try Value.Tag.ty.create(sema.arena, ty.errorUnionPayload()); + field_values[1] = try Value.Tag.ty.create(sema.arena, ty.errorUnionPayload(mod)); return sema.addConstant( type_info_ty, @@ -16332,7 +16304,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index); - break :t try enum_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); + break :t enum_field_ty_decl.val.toType(); }; const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_type.names.len); @@ -16416,7 +16388,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); try sema.ensureDeclAnalyzed(union_field_ty_decl_index); const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index); - break :t try union_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); + break :t union_field_ty_decl.val.toType(); }; const union_ty = try sema.resolveTypeFields(ty); @@ -16523,7 +16495,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); - break :t try struct_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); + break :t struct_field_ty_decl.val.toType(); }; const struct_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout @@ -16733,9 +16705,9 @@ fn typeInfoDecls( try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); try sema.ensureDeclAnalyzed(declaration_ty_decl_index); const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); - break :t try declaration_ty_decl.val.toType().copy(decls_anon_decl.arena()); + break :t declaration_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(try declaration_ty.copy(sema.arena)); + try sema.queueFullTypeResolution(declaration_ty); var decl_vals = std.ArrayList(Value).init(sema.gpa); defer decl_vals.deinit(); @@ -17018,12 +16990,12 @@ fn zirBoolBr( _ = try lhs_block.addBr(block_inst, lhs_result); const rhs_result = try sema.resolveBody(rhs_block, body, inst); - if (!sema.typeOf(rhs_result).isNoReturn()) { + if (!sema.typeOf(rhs_result).isNoReturn(mod)) { _ = try rhs_block.addBr(block_inst, rhs_result); } const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst); - if (!sema.typeOf(rhs_result).isNoReturn()) { + if (!sema.typeOf(rhs_result).isNoReturn(mod)) { if (try sema.resolveDefinedValue(rhs_block, sema.src, rhs_result)) |rhs_val| { if (is_bool_or and rhs_val.toBool(mod)) { return Air.Inst.Ref.bool_true; @@ -17211,7 +17183,7 @@ fn zirCondbr( const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); assert(operand_ty.zigTypeTag(mod) == .ErrorUnion); - const result_ty = operand_ty.errorUnionSet(); + const result_ty = operand_ty.errorUnionSet(mod); break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand); }; @@ -17318,7 +17290,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand_ty = sema.typeOf(operand); const ptr_info = operand_ty.ptrInfo(mod); const res_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = err_union_ty.errorUnionPayload(), + .pointee_type = err_union_ty.errorUnionPayload(mod), .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, .@"allowzero" = ptr_info.@"allowzero", @@ -17414,14 +17386,15 @@ fn zirRetErrValue( block: *Block, inst: Zir.Inst.Index, ) CompileError!Zir.Inst.Index { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const err_name = inst_data.get(sema.code); const src = inst_data.src(); // Return the error code from the function. - const kv = try sema.mod.getErrorValue(err_name); + const kv = try mod.getErrorValue(err_name); const result_inst = try sema.addConstant( - try Type.Tag.error_set_single.create(sema.arena, kv.key), + try mod.singleErrorSetType(err_name), try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), ); return sema.analyzeRet(block, result_inst, src); @@ -17632,17 +17605,15 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion); - if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { + if (mod.typeToInferredErrorSet(sema.fn_ret_ty.errorUnionSet(mod))) |ies| { const op_ty = sema.typeOf(uncasted_operand); switch (op_ty.zigTypeTag(mod)) { - .ErrorSet => { - try payload.data.addErrorSet(sema.gpa, op_ty); - }, - .ErrorUnion => { - try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet()); - }, + .ErrorSet => try ies.addErrorSet(op_ty, ip, gpa), + .ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(mod), ip, gpa), else => {}, } } @@ -18521,7 +18492,7 @@ fn addConstantMaybeRef( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const decl = try anon_decl.finish( - try ty.copy(anon_decl.arena()), + ty, try val.copy(anon_decl.arena()), 0, // default alignment ); @@ -18595,7 +18566,7 @@ fn fieldType( continue; }, .ErrorUnion => { - cur_ty = cur_ty.errorUnionPayload(); + cur_ty = cur_ty.errorUnionPayload(mod); continue; }, else => {}, @@ -18641,7 +18612,7 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); - if (ty.isNoReturn()) { + if (ty.isNoReturn(mod)) { return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } const val = try ty.lazyAbiAlignment(mod, sema.arena); @@ -18929,7 +18900,7 @@ fn zirReify( const sentinel_ptr_val = sentinel_val.castTag(.opt_payload).?.data; const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, - .pointee_type = try elem_ty.copy(sema.arena), + .pointee_type = elem_ty, }); const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; break :s sent_val.toIntern(); @@ -18993,7 +18964,7 @@ fn zirReify( const sentinel_val = struct_val[2]; const len = len_val.toUnsignedInt(mod); - const child_ty = try child_val.toType().copy(sema.arena); + const child_ty = child_val.toType(); const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: { const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, @@ -19011,7 +18982,7 @@ fn zirReify( // child: type, const child_val = struct_val[0]; - const child_ty = try child_val.toType().copy(sema.arena); + const child_ty = child_val.toType(); const ty = try Type.optional(sema.arena, child_ty, mod); return sema.addType(ty); @@ -19024,17 +18995,14 @@ fn zirReify( // payload: type, const payload_val = struct_val[1]; - const error_set_ty = try error_set_val.toType().copy(sema.arena); - const payload_ty = try payload_val.toType().copy(sema.arena); + const error_set_ty = error_set_val.toType(); + const payload_ty = payload_val.toType(); if (error_set_ty.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{}); } - const ty = try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = payload_ty, - }); + const ty = try mod.errorUnionType(error_set_ty, payload_ty); return sema.addType(ty); }, .ErrorSet => { @@ -19043,27 +19011,23 @@ fn zirReify( const slice_val = payload_val.castTag(.slice).?.data; const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod)); - var names: Module.ErrorSet.NameMap = .{}; + var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); - var i: usize = 0; - while (i < len) : (i += 1) { + for (0..len) |i| { const elem_val = try slice_val.ptr.elemValue(mod, i); const struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // error_set: type, const name_val = struct_val[0]; const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); - - const kv = try mod.getErrorValue(name_str); - const gop = names.getOrPutAssumeCapacity(kv.key); + const name_ip = try mod.intern_pool.getOrPutString(gpa, name_str); + const gop = names.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { return sema.fail(block, src, "duplicate error '{s}'", .{name_str}); } } - // names must be sorted - Module.ErrorSet.sortNames(&names); - const ty = try Type.Tag.error_set_merged.create(sema.arena, names); + const ty = try mod.errorSetFromUnsortedNames(names.keys()); return sema.addType(ty); }, .Struct => { @@ -19378,7 +19342,7 @@ fn zirReify( return sema.fail(block, src, "duplicate union field {s}", .{field_name}); } - const field_ty = try type_val.toType().copy(new_decl_arena_allocator); + const field_ty = type_val.toType(); gop.value_ptr.* = .{ .ty = field_ty, .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?), @@ -19673,7 +19637,7 @@ fn reifyStruct( return sema.fail(block, src, "comptime field without default initialization value", .{}); } - const field_ty = try type_val.toType().copy(new_decl_arena_allocator); + const field_ty = type_val.toType(); gop.value_ptr.* = .{ .ty = field_ty, .abi_align = abi_align, @@ -19751,7 +19715,7 @@ fn reifyStruct( if (backing_int_val.optionalValue(mod)) |payload| { const backing_int_ty = payload.toType(); try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); - struct_obj.backing_int_ty = try backing_int_ty.copy(new_decl_arena_allocator); + struct_obj.backing_int_ty = backing_int_ty; } else { struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); } @@ -20035,6 +19999,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -20050,22 +20016,27 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (disjoint: { // Try avoiding resolving inferred error sets if we can - if (!dest_ty.isAnyError() and dest_ty.errorSetNames().len == 0) break :disjoint true; - if (!operand_ty.isAnyError() and operand_ty.errorSetNames().len == 0) break :disjoint true; - if (dest_ty.isAnyError()) break :disjoint false; - if (operand_ty.isAnyError()) break :disjoint false; - for (dest_ty.errorSetNames()) |dest_err_name| - if (operand_ty.errorSetHasField(dest_err_name)) + if (!dest_ty.isAnyError(mod) and dest_ty.errorSetNames(mod).len == 0) break :disjoint true; + if (!operand_ty.isAnyError(mod) and operand_ty.errorSetNames(mod).len == 0) break :disjoint true; + if (dest_ty.isAnyError(mod)) break :disjoint false; + if (operand_ty.isAnyError(mod)) break :disjoint false; + for (dest_ty.errorSetNames(mod)) |dest_err_name| { + if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name)) break :disjoint false; + } - if (dest_ty.tag() != .error_set_inferred and operand_ty.tag() != .error_set_inferred) + if (!ip.isInferredErrorSetType(dest_ty.ip_index) and + !ip.isInferredErrorSetType(operand_ty.ip_index)) + { break :disjoint true; + } try sema.resolveInferredErrorSetTy(block, dest_ty_src, dest_ty); try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty); - for (dest_ty.errorSetNames()) |dest_err_name| - if (operand_ty.errorSetHasField(dest_err_name)) + for (dest_ty.errorSetNames(mod)) |dest_err_name| { + if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name)) break :disjoint false; + } break :disjoint true; }) { @@ -20085,9 +20056,9 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } if (maybe_operand_val) |val| { - if (!dest_ty.isAnyError()) { + if (!dest_ty.isAnyError(mod)) { const error_name = val.castTag(.@"error").?.data.name; - if (!dest_ty.errorSetHasField(error_name)) { + if (!dest_ty.errorSetHasField(error_name, mod)) { const msg = msg: { const msg = try sema.errMsg( block, @@ -20107,7 +20078,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } try sema.requireRuntimeBlock(block, src, operand_src); - if (block.wantSafety() and !dest_ty.isAnyError() and sema.mod.backendSupportsFeature(.error_set_has_value)) { + if (block.wantSafety() and !dest_ty.isAnyError(mod) and sema.mod.backendSupportsFeature(.error_set_has_value)) { const err_int_inst = try block.addBitCast(Type.err_int, operand); const ok = try block.addTyOp(.error_set_has_value, dest_ty, err_int_inst); try sema.addSafetyCheck(block, ok, .invalid_error_code); @@ -22862,7 +22833,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += body.len; const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, "return type must be comptime-known"); - const ty = try val.toType().copy(sema.arena); + const ty = val.toType(); break :blk ty; } else if (extra.data.bits.has_ret_ty_ref) blk: { const ret_ty_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); @@ -22873,7 +22844,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - const ty = try ret_ty_tv.val.toType().copy(sema.arena); + const ty = ret_ty_tv.val.toType(); break :blk ty; } else Type.void; @@ -23360,7 +23331,7 @@ fn validateRunTimeType( }, .Array, .Vector => ty = ty.childType(mod), - .ErrorUnion => ty = ty.errorUnionPayload(), + .ErrorUnion => ty = ty.errorUnionPayload(mod), .Struct, .Union => { const resolved_ty = try sema.resolveTypeFields(ty); @@ -23452,7 +23423,7 @@ fn explainWhyTypeIsComptimeInner( try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set); }, .ErrorUnion => { - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(mod), type_set); }, .Struct => { @@ -24065,7 +24036,9 @@ fn fieldVal( // in `fieldPtr`. This function takes a value and returns a value. const mod = sema.mod; + const gpa = sema.gpa; const arena = sema.arena; + const ip = &mod.intern_pool; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); @@ -24147,27 +24120,33 @@ fn fieldVal( switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { - const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { - if (payload.data.names.getEntry(field_name)) |entry| { - break :blk entry.key_ptr.*; - } - const msg = msg: { - const msg = try sema.errMsg(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(mod), - }); - errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, child_type); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } else (try mod.getErrorValue(field_name)).key; + const name = try ip.getOrPutString(gpa, field_name); + switch (ip.indexToKey(child_type.ip_index)) { + .error_set_type => |error_set_type| blk: { + if (error_set_type.nameIndex(ip, name) != null) break :blk; + const msg = msg: { + const msg = try sema.errMsg(block, src, "no error named '{s}' in '{}'", .{ + field_name, child_type.fmt(mod), + }); + errdefer msg.destroy(sema.gpa); + try sema.addDeclaredHereNote(msg, child_type); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + }, + .inferred_error_set_type => { + return sema.fail(block, src, "TODO handle inferred error sets here", .{}); + }, + .simple_type => |t| assert(t == .anyerror), + else => unreachable, + } return sema.addConstant( - if (!child_type.isAnyError()) - try child_type.copy(arena) + if (!child_type.isAnyError(mod)) + child_type else - try Type.Tag.error_set_single.create(arena, name), - try Value.Tag.@"error".create(arena, .{ .name = name }), + try mod.singleErrorSetTypeNts(name), + try Value.Tag.@"error".create(arena, .{ .name = ip.stringToSlice(name) }), ); }, .Union => { @@ -24252,6 +24231,8 @@ fn fieldPtr( // in `fieldVal`. This function takes a pointer and returns a pointer. const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { @@ -24362,24 +24343,33 @@ fn fieldPtr( switch (child_type.zigTypeTag(mod)) { .ErrorSet => { - // TODO resolve inferred error sets - const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { - if (payload.data.names.getEntry(field_name)) |entry| { - break :blk entry.key_ptr.*; - } - return sema.fail(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(mod), - }); - } else (try mod.getErrorValue(field_name)).key; + const name = try ip.getOrPutString(gpa, field_name); + switch (ip.indexToKey(child_type.ip_index)) { + .error_set_type => |error_set_type| blk: { + if (error_set_type.nameIndex(ip, name) != null) { + break :blk; + } + return sema.fail(block, src, "no error named '{s}' in '{}'", .{ + field_name, child_type.fmt(mod), + }); + }, + .inferred_error_set_type => { + return sema.fail(block, src, "TODO handle inferred error sets here", .{}); + }, + .simple_type => |t| assert(t == .anyerror), + else => unreachable, + } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - if (!child_type.isAnyError()) - try child_type.copy(anon_decl.arena()) + if (!child_type.isAnyError(mod)) + child_type else - try Type.Tag.error_set_single.create(anon_decl.arena(), name), - try Value.Tag.@"error".create(anon_decl.arena(), .{ .name = name }), + try mod.singleErrorSetTypeNts(name), + try Value.Tag.@"error".create(anon_decl.arena(), .{ + .name = ip.stringToSlice(name), + }), 0, // default alignment )); }, @@ -24589,7 +24579,7 @@ fn fieldCallBind( } }; } } else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and - first_param_type.errorUnionPayload().eql(concrete_ty, mod)) + first_param_type.errorUnionPayload(mod).eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ @@ -24832,7 +24822,7 @@ fn structFieldPtrByIndex( if (field.is_comptime) { const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ - .field_ty = try field.ty.copy(sema.arena), + .field_ty = field.ty, .field_val = try field.default_val.copy(sema.arena), }); return sema.addConstant(ptr_field_ty, val); @@ -26227,7 +26217,7 @@ fn coerceExtra( .none => switch (inst_val.tag()) { .eu_payload => { const payload = try sema.addConstant( - inst_ty.errorUnionPayload(), + inst_ty.errorUnionPayload(mod), inst_val.castTag(.eu_payload).?.data, ); return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) { @@ -26240,7 +26230,7 @@ fn coerceExtra( else => {}, } const error_set = try sema.addConstant( - inst_ty.errorUnionSet(), + inst_ty.errorUnionSet(mod), inst_val, ); return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src); @@ -26342,7 +26332,7 @@ fn coerceExtra( // E!T to T if (inst_ty.zigTypeTag(mod) == .ErrorUnion and - (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) + (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{}); try sema.errNote(block, inst_src, msg, "consider using 'try', 'catch', or 'if'", .{}); @@ -26393,7 +26383,7 @@ const InMemoryCoercionResult = union(enum) { optional_shape: Pair, optional_child: PairAndChild, from_anyerror, - missing_error: []const []const u8, + missing_error: []const InternPool.NullTerminatedString, /// true if wanted is var args fn_var_args: bool, /// true if wanted is generic @@ -26567,7 +26557,8 @@ const InMemoryCoercionResult = union(enum) { break; }, .missing_error => |missing_errors| { - for (missing_errors) |err| { + for (missing_errors) |err_index| { + const err = mod.intern_pool.stringToSlice(err_index); try sema.errNote(block, src, msg, "'error.{s}' not a member of destination error set", .{err}); } break; @@ -26813,8 +26804,8 @@ fn coerceInMemoryAllowed( // Error Unions if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) { - const dest_payload = dest_ty.errorUnionPayload(); - const src_payload = src_ty.errorUnionPayload(); + const dest_payload = dest_ty.errorUnionPayload(mod); + const src_payload = src_ty.errorUnionPayload(mod); const child = try sema.coerceInMemoryAllowed(block, dest_payload, src_payload, dest_is_mut, target, dest_src, src_src); if (child != .ok) { return InMemoryCoercionResult{ .error_union_payload = .{ @@ -26823,7 +26814,7 @@ fn coerceInMemoryAllowed( .wanted = dest_payload, } }; } - return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(), src_ty.errorUnionSet(), dest_is_mut, target, dest_src, src_src); + return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(mod), src_ty.errorUnionSet(mod), dest_is_mut, target, dest_src, src_src); } // Error Sets @@ -26903,8 +26894,8 @@ fn coerceInMemoryAllowed( if (child != .ok) { return InMemoryCoercionResult{ .optional_child = .{ .child = try child.dupe(sema.arena), - .actual = try src_child_type.copy(sema.arena), - .wanted = try dest_child_type.copy(sema.arena), + .actual = src_child_type, + .wanted = dest_child_type, } }; } @@ -26926,133 +26917,100 @@ fn coerceInMemoryAllowedErrorSets( src_src: LazySrcLoc, ) !InMemoryCoercionResult { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; // Coercion to `anyerror`. Note that this check can return false negatives // in case the error sets did not get resolved. - if (dest_ty.isAnyError()) { + if (dest_ty.isAnyError(mod)) { return .ok; } - if (dest_ty.castTag(.error_set_inferred)) |dst_payload| { - const dst_ies = dst_payload.data; + if (mod.typeToInferredErrorSetIndex(dest_ty).unwrap()) |dst_ies_index| { + const dst_ies = mod.inferredErrorSetPtr(dst_ies_index); // We will make an effort to return `ok` without resolving either error set, to // avoid unnecessary "unable to resolve error set" dependency loop errors. switch (src_ty.ip_index) { - .none => switch (src_ty.tag()) { - .error_set_inferred => { + .anyerror_type => {}, + else => switch (ip.indexToKey(src_ty.ip_index)) { + .inferred_error_set_type => |src_index| { // If both are inferred error sets of functions, and // the dest includes the source function, the coercion is OK. // This check is important because it works without forcing a full resolution // of inferred error sets. - const src_ies = src_ty.castTag(.error_set_inferred).?.data; - - if (dst_ies.inferred_error_sets.contains(src_ies)) { + if (dst_ies.inferred_error_sets.contains(src_index)) { return .ok; } }, - .error_set_single => { - const name = src_ty.castTag(.error_set_single).?.data; - if (dst_ies.errors.contains(name)) return .ok; - }, - .error_set_merged => { - const names = src_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - if (!dst_ies.errors.contains(name)) break; - } else return .ok; - }, - .error_set => { - const names = src_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { if (!dst_ies.errors.contains(name)) break; } else return .ok; }, else => unreachable, }, - .anyerror_type => {}, - else => switch (mod.intern_pool.indexToKey(src_ty.ip_index)) { - else => @panic("TODO"), - }, } if (dst_ies.func == sema.owner_func) { // We are trying to coerce an error set to the current function's // inferred error set. - try dst_ies.addErrorSet(sema.gpa, src_ty); + try dst_ies.addErrorSet(src_ty, ip, gpa); return .ok; } - try sema.resolveInferredErrorSet(block, dest_src, dst_payload.data); + try sema.resolveInferredErrorSet(block, dest_src, dst_ies_index); // isAnyError might have changed from a false negative to a true positive after resolution. - if (dest_ty.isAnyError()) { + if (dest_ty.isAnyError(mod)) { return .ok; } } - var missing_error_buf = std.ArrayList([]const u8).init(sema.gpa); + var missing_error_buf = std.ArrayList(InternPool.NullTerminatedString).init(gpa); defer missing_error_buf.deinit(); switch (src_ty.ip_index) { - .none => switch (src_ty.tag()) { - .error_set_inferred => { - const src_data = src_ty.castTag(.error_set_inferred).?.data; + .anyerror_type => switch (ip.indexToKey(dest_ty.ip_index)) { + .inferred_error_set_type => unreachable, // Caught by dest_ty.isAnyError(mod) above. + .simple_type => unreachable, // filtered out above + .error_set_type => return .from_anyerror, + else => unreachable, + }, + + else => switch (ip.indexToKey(src_ty.ip_index)) { + .inferred_error_set_type => |src_index| { + const src_data = mod.inferredErrorSetPtr(src_index); - try sema.resolveInferredErrorSet(block, src_src, src_data); + try sema.resolveInferredErrorSet(block, src_src, src_index); // src anyerror status might have changed after the resolution. - if (src_ty.isAnyError()) { - // dest_ty.isAnyError() == true is already checked for at this point. + if (src_ty.isAnyError(mod)) { + // dest_ty.isAnyError(mod) == true is already checked for at this point. return .from_anyerror; } for (src_data.errors.keys()) |key| { - if (!dest_ty.errorSetHasField(key)) { + if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), key)) { try missing_error_buf.append(key); } } if (missing_error_buf.items.len != 0) { return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } - - return .ok; - }, - .error_set_single => { - const name = src_ty.castTag(.error_set_single).?.data; - if (dest_ty.errorSetHasField(name)) { - return .ok; - } - const list = try sema.arena.alloc([]const u8, 1); - list[0] = name; - return InMemoryCoercionResult{ .missing_error = list }; - }, - .error_set_merged => { - const names = src_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - if (!dest_ty.errorSetHasField(name)) { - try missing_error_buf.append(name); - } - } - - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), + .missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items), }; } return .ok; }, - .error_set => { - const names = src_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - if (!dest_ty.errorSetHasField(name)) { + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { + if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), name)) { try missing_error_buf.append(name); } } if (missing_error_buf.items.len != 0) { return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), + .missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items), }; } @@ -27060,18 +27018,6 @@ fn coerceInMemoryAllowedErrorSets( }, else => unreachable, }, - - .anyerror_type => switch (dest_ty.ip_index) { - .none => switch (dest_ty.tag()) { - .error_set_inferred => unreachable, // Caught by dest_ty.isAnyError() above. - .error_set_single, .error_set_merged, .error_set => return .from_anyerror, - else => unreachable, - }, - .anyerror_type => unreachable, // Filtered out above. - else => @panic("TODO"), - }, - - else => @panic("TODO"), } unreachable; @@ -28029,7 +27975,7 @@ fn beginComptimePtrMutation( var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); switch (parent.pointee) { .direct => |val_ptr| { - const payload_ty = parent.ty.errorUnionPayload(); + const payload_ty = parent.ty.errorUnionPayload(mod); if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, @@ -28402,7 +28348,7 @@ fn beginComptimePtrLoad( => blk: { const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; const payload_ty = switch (ptr_val.tag()) { - .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(), + .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(mod), .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod), else => unreachable, }; @@ -29301,7 +29247,7 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const decl = try anon_decl.finish( - try ty.copy(anon_decl.arena()), + ty, try val.copy(anon_decl.arena()), 0, // default alignment ); @@ -29387,7 +29333,7 @@ fn analyzeRef( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try operand_ty.copy(anon_decl.arena()), + operand_ty, try val.copy(anon_decl.arena()), 0, // default alignment )); @@ -29555,7 +29501,7 @@ fn analyzeIsNonErrComptimeOnly( if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); - const payload_ty = operand_ty.errorUnionPayload(); + const payload_ty = operand_ty.errorUnionPayload(mod); if (payload_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } @@ -29577,23 +29523,28 @@ fn analyzeIsNonErrComptimeOnly( // exception if the error union error set is known to be empty, // we allow the comparison but always make it comptime-known. - const set_ty = operand_ty.errorUnionSet(); + const set_ty = operand_ty.errorUnionSet(mod); switch (set_ty.ip_index) { - .none => switch (set_ty.tag()) { - .error_set_inferred => blk: { + .anyerror_type => {}, + else => switch (mod.intern_pool.indexToKey(set_ty.ip_index)) { + .error_set_type => |error_set_type| { + if (error_set_type.names.len == 0) return Air.Inst.Ref.bool_true; + }, + .inferred_error_set_type => |ies_index| blk: { // If the error set is empty, we must return a comptime true or false. // However we want to avoid unnecessarily resolving an inferred error set // in case it is already non-empty. - const ies = set_ty.castTag(.error_set_inferred).?.data; + const ies = mod.inferredErrorSetPtr(ies_index); if (ies.is_anyerror) break :blk; if (ies.errors.count() != 0) break :blk; if (maybe_operand_val == null) { // Try to avoid resolving inferred error set if possible. if (ies.errors.count() != 0) break :blk; if (ies.is_anyerror) break :blk; - for (ies.inferred_error_sets.keys()) |other_ies| { - if (ies == other_ies) continue; - try sema.resolveInferredErrorSet(block, src, other_ies); + for (ies.inferred_error_sets.keys()) |other_ies_index| { + if (ies_index == other_ies_index) continue; + try sema.resolveInferredErrorSet(block, src, other_ies_index); + const other_ies = mod.inferredErrorSetPtr(other_ies_index); if (other_ies.is_anyerror) { ies.is_anyerror = true; ies.is_resolved = true; @@ -29608,18 +29559,12 @@ fn analyzeIsNonErrComptimeOnly( // so far with this type can't contain errors either. return Air.Inst.Ref.bool_true; } - try sema.resolveInferredErrorSet(block, src, ies); + try sema.resolveInferredErrorSet(block, src, ies_index); if (ies.is_anyerror) break :blk; if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true; } }, - else => if (set_ty.errorSetNames().len == 0) return Air.Inst.Ref.bool_true, - }, - - .anyerror_type => {}, - - else => switch (mod.intern_pool.indexToKey(set_ty.ip_index)) { - else => @panic("TODO"), + else => unreachable, }, } @@ -30516,7 +30461,8 @@ fn wrapErrorUnionPayload( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const dest_payload_ty = dest_ty.errorUnionPayload(); + const mod = sema.mod; + const dest_payload_ty = dest_ty.errorUnionPayload(mod); const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false }); if (try sema.resolveMaybeUndefVal(coerced)) |val| { return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val)); @@ -30533,51 +30479,41 @@ fn wrapErrorUnionSet( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); - const dest_err_set_ty = dest_ty.errorUnionSet(); + const dest_err_set_ty = dest_ty.errorUnionSet(mod); if (try sema.resolveMaybeUndefVal(inst)) |val| { switch (dest_err_set_ty.ip_index) { .anyerror_type => {}, - - .none => switch (dest_err_set_ty.tag()) { - .error_set_single => ok: { - const expected_name = val.castTag(.@"error").?.data.name; - const n = dest_err_set_ty.castTag(.error_set_single).?.data; - if (mem.eql(u8, expected_name, n)) break :ok; - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - }, - .error_set => { + else => switch (ip.indexToKey(dest_err_set_ty.ip_index)) { + .error_set_type => |error_set_type| ok: { const expected_name = val.castTag(.@"error").?.data.name; - const error_set = dest_err_set_ty.castTag(.error_set).?.data; - if (!error_set.names.contains(expected_name)) { - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); + if (ip.getString(expected_name).unwrap()) |expected_name_interned| { + if (error_set_type.nameIndex(ip, expected_name_interned) != null) + break :ok; } + return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, - .error_set_inferred => ok: { + .inferred_error_set_type => |ies_index| ok: { + const ies = mod.inferredErrorSetPtr(ies_index); const expected_name = val.castTag(.@"error").?.data.name; - const ies = dest_err_set_ty.castTag(.error_set_inferred).?.data; // We carefully do this in an order that avoids unnecessarily // resolving the destination error set type. if (ies.is_anyerror) break :ok; - if (ies.errors.contains(expected_name)) break :ok; + + if (ip.getString(expected_name).unwrap()) |expected_name_interned| { + if (ies.errors.contains(expected_name_interned)) break :ok; + } if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { break :ok; } return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, - .error_set_merged => { - const expected_name = val.castTag(.@"error").?.data.name; - const error_set = dest_err_set_ty.castTag(.error_set_merged).?.data; - if (!error_set.contains(expected_name)) { - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - } - }, else => unreachable, }, - - else => @panic("TODO"), } return sema.addConstant(dest_ty, val); } @@ -30743,11 +30679,11 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); continue; }, .ErrorUnion => { - const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); + const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_ty, src, src)) { continue; @@ -30757,7 +30693,7 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); continue; }, else => { @@ -30770,7 +30706,7 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); continue; } else { err_set_ty = candidate_ty; @@ -30781,14 +30717,14 @@ fn resolvePeerTypes( .ErrorUnion => switch (chosen_ty_tag) { .ErrorSet => { const chosen_set_ty = err_set_ty orelse chosen_ty; - const candidate_set_ty = candidate_ty.errorUnionSet(); + const candidate_set_ty = candidate_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { err_set_ty = chosen_set_ty; } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { err_set_ty = null; } else { - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); } chosen = candidate; chosen_i = candidate_i + 1; @@ -30796,8 +30732,8 @@ fn resolvePeerTypes( }, .ErrorUnion => { - const chosen_payload_ty = chosen_ty.errorUnionPayload(); - const candidate_payload_ty = candidate_ty.errorUnionPayload(); + const chosen_payload_ty = chosen_ty.errorUnionPayload(mod); + const candidate_payload_ty = candidate_ty.errorUnionPayload(mod); const coerce_chosen = (try sema.coerceInMemoryAllowed(block, chosen_payload_ty, candidate_payload_ty, false, target, src, src)) == .ok; const coerce_candidate = (try sema.coerceInMemoryAllowed(block, candidate_payload_ty, chosen_payload_ty, false, target, src, src)) == .ok; @@ -30811,15 +30747,15 @@ fn resolvePeerTypes( chosen_i = candidate_i + 1; } - const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); - const candidate_set_ty = candidate_ty.errorUnionSet(); + const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); + const candidate_set_ty = candidate_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { err_set_ty = chosen_set_ty; } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { err_set_ty = candidate_set_ty; } else { - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); } continue; } @@ -30827,13 +30763,13 @@ fn resolvePeerTypes( else => { if (err_set_ty) |chosen_set_ty| { - const candidate_set_ty = candidate_ty.errorUnionSet(); + const candidate_set_ty = candidate_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { err_set_ty = chosen_set_ty; } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { err_set_ty = null; } else { - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); } } seen_const = seen_const or chosen_ty.isConstPtr(mod); @@ -30963,7 +30899,7 @@ fn resolvePeerTypes( } }, .ErrorUnion => { - const chosen_ptr_ty = chosen_ty.errorUnionPayload(); + const chosen_ptr_ty = chosen_ty.errorUnionPayload(mod); if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { const chosen_info = chosen_ptr_ty.ptrInfo(mod); @@ -31073,7 +31009,7 @@ fn resolvePeerTypes( } }, .ErrorUnion => { - const payload_ty = chosen_ty.errorUnionPayload(); + const payload_ty = chosen_ty.errorUnionPayload(mod); if ((try sema.coerceInMemoryAllowed(block, payload_ty, candidate_ty, false, target, src, src)) == .ok) { continue; } @@ -31090,7 +31026,7 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, chosen_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, chosen_ty); continue; } else { err_set_ty = chosen_ty; @@ -31148,14 +31084,14 @@ fn resolvePeerTypes( else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); + return try mod.errorUnionType(set_ty, opt_ptr_ty); } if (seen_const) { // turn []T => []const T switch (chosen_ty.zigTypeTag(mod)) { .ErrorUnion => { - const ptr_ty = chosen_ty.errorUnionPayload(); + const ptr_ty = chosen_ty.errorUnionPayload(mod); var info = ptr_ty.ptrInfo(mod); info.mutable = false; const new_ptr_ty = try Type.ptr(sema.arena, mod, info); @@ -31163,8 +31099,8 @@ fn resolvePeerTypes( try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; - const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); + const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); + return try mod.errorUnionType(set_ty, opt_ptr_ty); }, .Pointer => { var info = chosen_ty.ptrInfo(mod); @@ -31175,7 +31111,7 @@ fn resolvePeerTypes( else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); + return try mod.errorUnionType(set_ty, opt_ptr_ty); }, else => return chosen_ty, } @@ -31187,16 +31123,16 @@ fn resolvePeerTypes( else => try Type.optional(sema.arena, chosen_ty, mod), }; const set_ty = err_set_ty orelse return opt_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ty, mod); + return try mod.errorUnionType(set_ty, opt_ty); } if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag(mod)) { .ErrorSet => return ty, .ErrorUnion => { - const payload_ty = chosen_ty.errorUnionPayload(); - return try Type.errorUnion(sema.arena, ty, payload_ty, mod); + const payload_ty = chosen_ty.errorUnionPayload(mod); + return try mod.errorUnionType(ty, payload_ty); }, - else => return try Type.errorUnion(sema.arena, ty, chosen_ty, mod), + else => return try mod.errorUnionType(ty, chosen_ty), }; return chosen_ty; @@ -31279,7 +31215,7 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { return sema.resolveTypeLayout(payload_ty); }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); return sema.resolveTypeLayout(payload_ty); }, .Fn => { @@ -31465,7 +31401,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi }; try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum); - struct_obj.backing_int_ty = try backing_int_ty.copy(decl_arena_allocator); + struct_obj.backing_int_ty = backing_int_ty; try wip_captures.finalize(); } else { if (fields_bit_sum > std.math.maxInt(u16)) { @@ -31605,18 +31541,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return switch (ty.ip_index) { .empty_struct_type => false, - .none => switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => false, - - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - - .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), - }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, .ptr_type => |ptr_type| { @@ -31635,6 +31559,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), .opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()), .error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()), + .error_set_type, .inferred_error_set_type => false, + .func_type => true, .simple_type => |t| switch (t) { @@ -31780,7 +31706,7 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { .Optional => { return sema.resolveTypeFully(ty.optionalChild(mod)); }, - .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload()), + .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload(mod)), .Fn => { const info = mod.typeToFunc(ty).?; if (info.is_generic) { @@ -32048,16 +31974,17 @@ fn resolveInferredErrorSet( sema: *Sema, block: *Block, src: LazySrcLoc, - ies: *Module.Fn.InferredErrorSet, + ies_index: Module.Fn.InferredErrorSet.Index, ) CompileError!void { + const mod = sema.mod; + const ies = mod.inferredErrorSetPtr(ies_index); + if (ies.is_resolved) return; if (ies.func.state == .in_progress) { return sema.fail(block, src, "unable to resolve inferred error set", .{}); } - const mod = sema.mod; - // In order to ensure that all dependencies are properly added to the set, we // need to ensure the function body is analyzed of the inferred error set. // However, in the case of comptime/inline function calls with inferred error sets, @@ -32072,7 +31999,7 @@ fn resolveInferredErrorSet( // so here we can simply skip this case. if (ies_func_info.return_type == .generic_poison_type) { assert(ies_func_info.cc == .Inline); - } else if (ies_func_info.return_type.toType().errorUnionSet().castTag(.error_set_inferred).?.data == ies) { + } else if (mod.typeToInferredErrorSet(ies_func_info.return_type.toType().errorUnionSet(mod)).? == ies) { if (ies_func_info.is_generic) { const msg = msg: { const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{}); @@ -32090,10 +32017,11 @@ fn resolveInferredErrorSet( ies.is_resolved = true; - for (ies.inferred_error_sets.keys()) |other_ies| { - if (ies == other_ies) continue; - try sema.resolveInferredErrorSet(block, src, other_ies); + for (ies.inferred_error_sets.keys()) |other_ies_index| { + if (ies_index == other_ies_index) continue; + try sema.resolveInferredErrorSet(block, src, other_ies_index); + const other_ies = mod.inferredErrorSetPtr(other_ies_index); for (other_ies.errors.keys()) |key| { try ies.errors.put(sema.gpa, key, {}); } @@ -32108,8 +32036,9 @@ fn resolveInferredErrorSetTy( src: LazySrcLoc, ty: Type, ) CompileError!void { - if (ty.castTag(.error_set_inferred)) |inferred| { - try sema.resolveInferredErrorSet(block, src, inferred.data); + const mod = sema.mod; + if (mod.typeToInferredErrorSetIndex(ty).unwrap()) |ies_index| { + try sema.resolveInferredErrorSet(block, src, ies_index); } } @@ -32333,7 +32262,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } const field = &struct_obj.fields.values()[field_i]; - field.ty = try field_ty.copy(decl_arena_allocator); + field.ty = field_ty; if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { @@ -32809,7 +32738,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } gop.value_ptr.* = .{ - .ty = try field_ty.copy(decl_arena_allocator), + .ty = field_ty, .abi_align = 0, }; @@ -33038,13 +32967,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .empty_struct_type => return Value.empty_struct, .none => switch (ty.tag()) { - .error_set_single, - .error_set, - .error_set_merged, - .error_union, - .error_set_inferred, - => return null, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -33062,6 +32984,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_union_type, .func_type, .anyframe_type, + .error_set_type, + .inferred_error_set_type, => null, .array_type => |array_type| { @@ -33389,7 +33313,7 @@ fn analyzeComptimeAlloc( defer anon_decl.deinit(); const decl_index = try anon_decl.finish( - try var_type.copy(anon_decl.arena()), + var_type, // There will be stores before the first load, but they may be to sub-elements or // sub-fields. So we need to initialize with undef to allow the mechanism to expand // into fields/elements and have those overridden with stored values. @@ -33600,8 +33524,6 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { switch (ty.tag()) { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - - else => return null, } } @@ -33616,18 +33538,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return switch (ty.ip_index) { .empty_struct_type => false, - .none => switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => false, - - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - - .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), - }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return false, .ptr_type => |ptr_type| { @@ -33649,6 +33559,9 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_union_type => |error_union_type| { return sema.typeRequiresComptime(error_union_type.payload_type.toType()); }, + + .error_set_type, .inferred_error_set_type => false, + .func_type => true, .simple_type => |t| return switch (t) { @@ -34410,3 +34323,23 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { .vector_index = vector_info.vector_index, }); } + +/// Merge lhs with rhs. +/// Asserts that lhs and rhs are both error sets and are resolved. +fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { + const mod = sema.mod; + const arena = sema.arena; + const lhs_names = lhs.errorSetNames(mod); + const rhs_names = rhs.errorSetNames(mod); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; + try names.ensureUnusedCapacity(arena, lhs_names.len); + + for (lhs_names) |name| { + names.putAssumeCapacityNoClobber(name, {}); + } + for (rhs_names) |name| { + try names.put(arena, name, {}); + } + + return mod.errorSetFromUnsortedNames(names.keys()); +} diff --git a/src/TypedValue.zig b/src/TypedValue.zig index ced20ac52212..144b7ebf9d48 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -27,13 +27,13 @@ pub const Managed = struct { /// Assumes arena allocation. Does a recursive copy. pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue { return TypedValue{ - .ty = try self.ty.copy(arena), + .ty = self.ty, .val = try self.val.copy(arena), }; } pub fn eql(a: TypedValue, b: TypedValue, mod: *Module) bool { - if (!a.ty.eql(b.ty, mod)) return false; + if (a.ty.ip_index != b.ty.ip_index) return false; return a.val.eql(b.val, a.ty, mod); } @@ -286,7 +286,7 @@ pub fn print( .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}), .eu_payload => { val = val.castTag(.eu_payload).?.data; - ty = ty.errorUnionPayload(); + ty = ty.errorUnionPayload(mod); }, .opt_payload => { val = val.castTag(.opt_payload).?.data; diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 8b84189e1803..c9126747da04 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3065,8 +3065,8 @@ fn errUnionErr( maybe_inst: ?Air.Inst.Index, ) !MCValue { const mod = self.bin_file.options.module.?; - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } @@ -3145,8 +3145,8 @@ fn errUnionPayload( maybe_inst: ?Air.Inst.Index, ) !MCValue { const mod = self.bin_file.options.module.?; - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } @@ -3305,8 +3305,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; @@ -3329,8 +3329,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; @@ -4893,7 +4893,7 @@ fn isErr( error_union_ty: Type, ) !MCValue { const mod = self.bin_file.options.module.?; - const error_type = error_union_ty.errorUnionSet(); + const error_type = error_union_ty.errorUnionSet(mod); if (error_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index a6a715c75d08..fa8646be430e 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2042,8 +2042,8 @@ fn errUnionErr( maybe_inst: ?Air.Inst.Index, ) !MCValue { const mod = self.bin_file.options.module.?; - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } @@ -2119,8 +2119,8 @@ fn errUnionPayload( maybe_inst: ?Air.Inst.Index, ) !MCValue { const mod = self.bin_file.options.module.?; - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } @@ -2232,8 +2232,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; @@ -2256,8 +2256,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; @@ -4871,7 +4871,7 @@ fn isErr( error_union_ty: Type, ) !MCValue { const mod = self.bin_file.options.module.?; - const error_type = error_union_ty.errorUnionSet(); + const error_type = error_union_ty.errorUnionSet(mod); if (error_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 072d3ed098fe..13f129f87b49 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -2707,12 +2707,12 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.typeOf(ty_op.operand); - const payload_ty = error_union_ty.errorUnionPayload(); + const payload_ty = error_union_ty.errorUnionPayload(mod); const mcv = try self.resolveInst(ty_op.operand); - const mod = self.bin_file.options.module.?; if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement unwrap error union error for non-empty payloads", .{}); @@ -2721,11 +2721,11 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.typeOf(ty_op.operand); - const payload_ty = error_union_ty.errorUnionPayload(); - const mod = self.bin_file.options.module.?; + const payload_ty = error_union_ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none; return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{}); @@ -2735,12 +2735,12 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const payload_ty = error_union_ty.errorUnionPayload(); + const payload_ty = error_union_ty.errorUnionPayload(mod); const mcv = try self.resolveInst(ty_op.operand); - const mod = self.bin_file.options.module.?; if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); @@ -3529,8 +3529,8 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { /// Given an error union, returns the payload fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { const mod = self.bin_file.options.module.?; - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return error_union_mcv; } @@ -4168,8 +4168,8 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { const mod = self.bin_file.options.module.?; - const error_type = ty.errorUnionSet(); - const payload_type = ty.errorUnionPayload(); + const error_type = ty.errorUnionSet(mod); + const payload_type = ty.errorUnionPayload(mod); if (!error_type.hasRuntimeBits(mod)) { return MCValue{ .immediate = 0 }; // always false diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a95026484028..2d7e4a858576 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1264,7 +1264,7 @@ fn genFunc(func: *CodeGen) InnerError!void { if (func_type.returns.len != 0 and func.air.instructions.len > 0) { const inst = @intCast(u32, func.air.instructions.len - 1); const last_inst_ty = func.typeOfIndex(inst); - if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn()) { + if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn(mod)) { try func.addTag(.@"unreachable"); } } @@ -1757,7 +1757,7 @@ fn isByRef(ty: Type, mod: *Module) bool { .Int => return ty.intInfo(mod).bits > 64, .Float => return ty.floatBits(target) > 64, .ErrorUnion => { - const pl_ty = ty.errorUnionPayload(); + const pl_ty = ty.errorUnionPayload(mod); if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } @@ -2256,7 +2256,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const result_value = result_value: { if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { break :result_value WValue{ .none = {} }; - } else if (ret_ty.isNoReturn()) { + } else if (ret_ty.isNoReturn(mod)) { try func.addTag(.@"unreachable"); break :result_value WValue{ .none = {} }; } else if (first_param_sret) { @@ -2346,7 +2346,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE const abi_size = ty.abiSize(mod); switch (ty.zigTypeTag(mod)) { .ErrorUnion => { - const pl_ty = ty.errorUnionPayload(); + const pl_ty = ty.errorUnionPayload(mod); if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.anyerror, 0); } @@ -3111,8 +3111,8 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { else => return WValue{ .imm32 = 0 }, }, .ErrorUnion => { - const error_type = ty.errorUnionSet(); - const payload_type = ty.errorUnionPayload(); + const error_type = ty.errorUnionSet(mod); + const payload_type = ty.errorUnionPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const is_pl = val.errorUnionIsPayload(); @@ -3916,10 +3916,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const err_union_ty = func.typeOf(un_op); - const pl_ty = err_union_ty.errorUnionPayload(); + const pl_ty = err_union_ty.errorUnionPayload(mod); const result = result: { - if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { switch (opcode) { .i32_ne => break :result WValue{ .imm32 = 0 }, .i32_eq => break :result WValue{ .imm32 = 1 }, @@ -3953,7 +3953,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const operand = try func.resolveInst(ty_op.operand); const op_ty = func.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; - const payload_ty = err_ty.errorUnionPayload(); + const payload_ty = err_ty.errorUnionPayload(mod); const result = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -3981,10 +3981,10 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) const operand = try func.resolveInst(ty_op.operand); const op_ty = func.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; - const payload_ty = err_ty.errorUnionPayload(); + const payload_ty = err_ty.errorUnionPayload(mod); const result = result: { - if (err_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (err_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { break :result WValue{ .imm32 = 0 }; } @@ -4031,7 +4031,7 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const err_ty = func.air.getRefType(ty_op.ty); - const pl_ty = err_ty.errorUnionPayload(); + const pl_ty = err_ty.errorUnionPayload(mod); const result = result: { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4044,7 +4044,7 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // write 'undefined' to the payload const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); - const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(mod)); + const len = @intCast(u32, err_ty.errorUnionPayload(mod).abiSize(mod)); try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); break :result err_union; @@ -5362,7 +5362,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const ty_op = func.air.instructions.items(.data)[inst].ty_op; const err_set_ty = func.typeOf(ty_op.operand).childType(mod); - const payload_ty = err_set_ty.errorUnionPayload(); + const payload_ty = err_set_ty.errorUnionPayload(mod); const operand = try func.resolveInst(ty_op.operand); // set error-tag to '0' to annotate error union is non-error @@ -6177,10 +6177,10 @@ fn lowerTry( return func.fail("TODO: lowerTry for pointers", .{}); } - const pl_ty = err_union_ty.errorUnionPayload(); + const pl_ty = err_union_ty.errorUnionPayload(mod); const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { // Block we can jump out of when error is not set try func.startBlock(.block, wasm.block_empty); @@ -6742,7 +6742,7 @@ fn callIntrinsic( if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { return WValue.none; - } else if (return_type.isNoReturn()) { + } else if (return_type.isNoReturn(mod)) { try func.addTag(.@"unreachable"); return WValue.none; } else if (want_sret_param) { @@ -6941,20 +6941,21 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { } fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const error_set_ty = func.air.getRefType(ty_op.ty); const result = try func.allocLocal(Type.bool); - const names = error_set_ty.errorSetNames(); + const names = error_set_ty.errorSetNames(mod); var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len); defer values.deinit(); - const mod = func.bin_file.base.options.module.?; var lowest: ?u32 = null; var highest: ?u32 = null; - for (names) |name| { + for (names) |name_ip| { + const name = mod.intern_pool.stringToSlice(name_ip); const err_int = mod.global_error_set.get(name).?; if (lowest) |*l| { if (err_int < l.*) { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index e83644269f68..77b4e6d42509 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -3612,8 +3612,8 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_union_ty = self.typeOf(ty_op.operand); - const err_ty = err_union_ty.errorUnionSet(); - const payload_ty = err_union_ty.errorUnionPayload(); + const err_ty = err_union_ty.errorUnionSet(mod); + const payload_ty = err_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { @@ -3671,7 +3671,7 @@ fn genUnwrapErrorUnionPayloadMir( err_union: MCValue, ) !MCValue { const mod = self.bin_file.options.module.?; - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const result: MCValue = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; @@ -3731,8 +3731,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(dst_lock); const eu_ty = src_ty.childType(mod); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmRegisterMemory( @@ -3771,7 +3771,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const eu_ty = src_ty.childType(mod); - const pl_ty = eu_ty.errorUnionPayload(); + const pl_ty = eu_ty.errorUnionPayload(mod); const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( @@ -3797,8 +3797,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(src_lock); const eu_ty = src_ty.childType(mod); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmMemoryImmediate( @@ -3901,8 +3901,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { @@ -3924,8 +3924,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); const result: MCValue = result: { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); @@ -8782,7 +8782,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { const mod = self.bin_file.options.module.?; - const err_type = ty.errorUnionSet(); + const err_type = ty.errorUnionSet(mod); if (err_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false @@ -8793,7 +8793,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! self.eflags_inst = inst; } - const err_off = errUnionErrorOffset(ty.errorUnionPayload(), mod); + const err_off = errUnionErrorOffset(ty.errorUnionPayload(mod), mod); switch (operand) { .register => |reg| { const eu_lock = self.register_manager.lockReg(reg); diff --git a/src/codegen.zig b/src/codegen.zig index 8e145a3b32c6..775eb09ab048 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -139,7 +139,7 @@ pub fn generateLazySymbol( return generateLazyFunction(bin_file, src_loc, lazy_sym, code, debug_output); } - if (lazy_sym.ty.isAnyError()) { + if (lazy_sym.ty.isAnyError(mod)) { alignment.* = 4; const err_names = mod.error_name_list.items; mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, err_names.len), endian); @@ -670,8 +670,8 @@ pub fn generateSymbol( return Result.ok; }, .ErrorUnion => { - const error_ty = typed_value.ty.errorUnionSet(); - const payload_ty = typed_value.ty.errorUnionPayload(); + const error_ty = typed_value.ty.errorUnionSet(mod); + const payload_ty = typed_value.ty.errorUnionPayload(mod); const is_payload = typed_value.val.errorUnionIsPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -894,7 +894,7 @@ fn lowerParentPtr( }, .eu_payload_ptr => { const eu_payload_ptr = parent_ptr.castTag(.eu_payload_ptr).?.data; - const pl_ty = eu_payload_ptr.container_ty.errorUnionPayload(); + const pl_ty = eu_payload_ptr.container_ty.errorUnionPayload(mod); return lowerParentPtr( bin_file, src_loc, @@ -1249,8 +1249,8 @@ pub fn genTypedValue( } }, .ErrorUnion => { - const error_type = typed_value.ty.errorUnionSet(); - const payload_type = typed_value.ty.errorUnionPayload(); + const error_type = typed_value.ty.errorUnionSet(mod); + const payload_type = typed_value.ty.errorUnionPayload(mod); const is_pl = typed_value.val.errorUnionIsPayload(); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index c2a108d68ed0..c9cc48590381 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -465,7 +465,7 @@ pub const Function = struct { }), }, .data = switch (key) { - .tag_name => .{ .tag_name = try data.tag_name.copy(arena) }, + .tag_name => .{ .tag_name = data.tag_name }, .never_tail => .{ .never_tail = data.never_tail }, .never_inline => .{ .never_inline = data.never_inline }, }, @@ -862,8 +862,8 @@ pub const DeclGen = struct { return writer.writeByte('}'); }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - const error_ty = ty.errorUnionSet(); + const payload_ty = ty.errorUnionPayload(mod); + const error_ty = ty.errorUnionSet(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, val, location); @@ -1252,8 +1252,8 @@ pub const DeclGen = struct { } }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - const error_ty = ty.errorUnionSet(); + const payload_ty = ty.errorUnionPayload(mod); + const error_ty = ty.errorUnionSet(mod); const error_val = if (val.errorUnionIsPayload()) try mod.intValue(Type.anyerror, 0) else val; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4252,6 +4252,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { } fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Block, ty_pl.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; @@ -4284,7 +4285,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.indent_writer.insertNewline(); // noreturn blocks have no `br` instructions reaching them, so we don't want a label - if (!f.typeOfIndex(inst).isNoReturn()) { + if (!f.typeOfIndex(inst).isNoReturn(mod)) { // label must be followed by an expression, include an empty one. try writer.print("zig_block_{d}:;\n", .{block_id}); } @@ -4322,10 +4323,10 @@ fn lowerTry( const inst_ty = f.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { try writer.writeAll("if ("); if (!payload_has_bits) { if (is_ptr) @@ -5500,8 +5501,8 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer; const error_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const local = try f.allocLocal(inst, inst_ty); if (!payload_ty.hasRuntimeBits(mod) and operand == .local and operand.local == local.new_local) { @@ -5539,7 +5540,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const error_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; const writer = f.object.writer(); - if (!error_union_ty.errorUnionPayload().hasRuntimeBits(mod)) { + if (!error_union_ty.errorUnionPayload(mod).hasRuntimeBits(mod)) { if (!is_ptr) return .none; const local = try f.allocLocal(inst, inst_ty); @@ -5601,9 +5602,9 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.typeOfIndex(inst); - const payload_ty = inst_ty.errorUnionPayload(); + const payload_ty = inst_ty.errorUnionPayload(mod); const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); - const err_ty = inst_ty.errorUnionSet(); + const err_ty = inst_ty.errorUnionSet(mod); const err = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5642,8 +5643,8 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); const error_union_ty = f.typeOf(ty_op.operand).childType(mod); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); // First, set the non-error value. if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -5691,10 +5692,10 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.typeOfIndex(inst); - const payload_ty = inst_ty.errorUnionPayload(); + const payload_ty = inst_ty.errorUnionPayload(mod); const payload = try f.resolveInst(ty_op.operand); const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); - const err_ty = inst_ty.errorUnionSet(); + const err_ty = inst_ty.errorUnionSet(mod); try reap(f, inst, &.{ty_op.operand}); const writer = f.object.writer(); @@ -5729,8 +5730,8 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const const operand_ty = f.typeOf(un_op); const local = try f.allocLocal(inst, Type.bool); const err_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; - const payload_ty = err_union_ty.errorUnionPayload(); - const error_ty = err_union_ty.errorUnionSet(); + const payload_ty = err_union_ty.errorUnionPayload(mod); + const error_ty = err_union_ty.errorUnionSet(mod); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 70426972af6a..dc1749d42e0e 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1680,14 +1680,14 @@ pub const CType = extern union { .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), .payload => unreachable, }) |fwd_idx| { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); if (try lookup.typeToIndex(payload_ty, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter => .complete, .global => .global, .payload => unreachable, })) |payload_idx| { - const error_ty = ty.errorUnionSet(); + const error_ty = ty.errorUnionSet(mod); if (payload_idx == Tag.void.toIndex()) { try self.initType(error_ty, kind, lookup); } else if (try lookup.typeToIndex(error_ty, kind)) |error_idx| { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 0c503edee404..1da3d91b13d1 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -362,15 +362,11 @@ pub const Object = struct { decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value), /// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction. named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value), - /// Maps Zig types to LLVM types. The table memory itself is backed by the GPA of - /// the compiler, but the Type/Value memory here is backed by `type_map_arena`. - /// TODO we need to remove entries from this map in response to incremental compilation - /// but I think the frontend won't tell us about types that get deleted because - /// hasRuntimeBits() is false for types. + /// Maps Zig types to LLVM types. The table memory is backed by the GPA of + /// the compiler. + /// TODO when InternPool garbage collection is implemented, this map needs + /// to be garbage collected as well. type_map: TypeMap, - /// The backing memory for `type_map`. Periodically garbage collected after flush(). - /// The code for doing the periodical GC is not yet implemented. - type_map_arena: std.heap.ArenaAllocator, di_type_map: DITypeMap, /// The LLVM global table which holds the names corresponding to Zig errors. /// Note that the values are not added until flushModule, when all errors in @@ -381,12 +377,7 @@ pub const Object = struct { /// name collision. extern_collisions: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void), - pub const TypeMap = std.HashMapUnmanaged( - Type, - *llvm.Type, - Type.HashContext64, - std.hash_map.default_max_load_percentage, - ); + pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, *llvm.Type); /// This is an ArrayHashMap as opposed to a HashMap because in `flushModule` we /// want to iterate over it while adding entries to it. @@ -543,7 +534,6 @@ pub const Object = struct { .decl_map = .{}, .named_enum_map = .{}, .type_map = .{}, - .type_map_arena = std.heap.ArenaAllocator.init(gpa), .di_type_map = .{}, .error_name_table = null, .extern_collisions = .{}, @@ -563,7 +553,6 @@ pub const Object = struct { self.decl_map.deinit(gpa); self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); - self.type_map_arena.deinit(); self.extern_collisions.deinit(gpa); self.* = undefined; } @@ -1462,9 +1451,6 @@ pub const Object = struct { return o.lowerDebugTypeImpl(entry, resolve, di_type); } errdefer assert(o.di_type_map.orderedRemoveContext(ty, .{ .mod = o.module })); - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try ty.copy(o.type_map_arena.allocator()); const entry: Object.DITypeMap.Entry = .{ .key_ptr = gop.key_ptr, .value_ptr = gop.value_ptr, @@ -1868,7 +1854,7 @@ pub const Object = struct { return full_di_ty; }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. @@ -2823,7 +2809,7 @@ pub const DeclGen = struct { .Opaque => { if (t.ip_index == .anyopaque_type) return dg.context.intType(8); - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = mod }); + const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; const opaque_type = mod.intern_pool.indexToKey(t.ip_index).opaque_type; @@ -2869,7 +2855,7 @@ pub const DeclGen = struct { return dg.context.structType(&fields_buf, 3, .False); }, .ErrorUnion => { - const payload_ty = t.errorUnionPayload(); + const payload_ty = t.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try dg.lowerType(Type.anyerror); } @@ -2913,13 +2899,9 @@ pub const DeclGen = struct { }, .ErrorSet => return dg.context.intType(16), .Struct => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = mod }); + const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - const struct_type = switch (mod.intern_pool.indexToKey(t.ip_index)) { .anon_struct_type => |tuple| { const llvm_struct_ty = dg.context.structCreateNamed(""); @@ -3041,13 +3023,9 @@ pub const DeclGen = struct { return llvm_struct_ty; }, .Union => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = mod }); + const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - const layout = t.unionGetLayout(mod); const union_obj = mod.typeToUnion(t).?; @@ -3571,7 +3549,7 @@ pub const DeclGen = struct { } }, .ErrorUnion => { - const payload_type = tv.ty.errorUnionPayload(); + const payload_type = tv.ty.errorUnionPayload(mod); const is_pl = tv.val.errorUnionIsPayload(); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4130,7 +4108,7 @@ pub const DeclGen = struct { const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, true); - const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(); + const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // In this case, we represent pointer to error union the same as pointer // to the payload. @@ -5368,7 +5346,7 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); const parent_bb = self.context.createBasicBlock("Block"); - if (inst_ty.isNoReturn()) { + if (inst_ty.isNoReturn(mod)) { try self.genBody(body); return null; } @@ -5490,11 +5468,11 @@ pub const FuncGen = struct { is_unused: bool, ) !?*llvm.Value { const mod = fg.dg.module; - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_union_llvm_ty = try fg.dg.lowerType(err_union_ty); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const is_err = err: { const err_set_ty = try fg.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); @@ -5601,6 +5579,7 @@ pub const FuncGen = struct { } fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; @@ -5616,7 +5595,7 @@ pub const FuncGen = struct { // would have been emitted already. Also the main loop in genBody can // be while(true) instead of for(body), which will eliminate 1 branch on // a hot path. - if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn()) { + if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(mod)) { _ = self.builder.buildBr(loop_block); } return null; @@ -6674,11 +6653,11 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const err_set_ty = try self.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); - if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const llvm_i1 = self.context.intType(1); switch (op) { .EQ => return llvm_i1.constInt(1, .False), // 0 == 0 @@ -6825,7 +6804,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; - if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const err_llvm_ty = try self.dg.lowerType(Type.anyerror); if (operand_is_ptr) { return operand; @@ -6836,7 +6815,7 @@ pub const FuncGen = struct { const err_set_llvm_ty = try self.dg.lowerType(Type.anyerror); - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (!operand_is_ptr) return operand; return self.builder.buildLoad(err_set_llvm_ty, operand, ""); @@ -6859,7 +6838,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const err_union_ty = self.typeOf(ty_op.operand).childType(mod); - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.err_int, 0) }); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { _ = self.builder.buildStore(non_error_val, operand); @@ -6968,7 +6947,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_un_ty = self.typeOfIndex(inst); - const payload_ty = err_un_ty.errorUnionPayload(); + const payload_ty = err_un_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; @@ -8787,13 +8766,14 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const error_set_ty = self.air.getRefType(ty_op.ty); - const names = error_set_ty.errorSetNames(); + const names = error_set_ty.errorSetNames(mod); const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid"); const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid"); const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len)); - for (names) |name| { + for (names) |name_ip| { + const name = mod.intern_pool.stringToSlice(name_ip); const err_int = mod.global_error_set.get(name).?; const this_tag_int_value = try self.dg.lowerValue(.{ .ty = Type.err_int, @@ -11095,7 +11075,7 @@ fn isByRef(ty: Type, mod: *Module) bool { else => return ty.hasRuntimeBits(mod), }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index eada74e6d4db..612ac1f2527d 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -801,7 +801,7 @@ pub const DeclGen = struct { }, }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); const is_pl = val.errorUnionIsPayload(); const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); @@ -1365,7 +1365,7 @@ pub const DeclGen = struct { .Union => return try self.resolveUnionType(ty, null), .ErrorSet => return try self.intType(.unsigned, 16), .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); const error_ty_ref = try self.resolveType(Type.anyerror, .indirect); const eu_layout = self.errorUnionLayout(payload_ty); @@ -2875,7 +2875,7 @@ pub const DeclGen = struct { const eu_layout = self.errorUnionLayout(payload_ty); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const err_id = if (eu_layout.payload_has_bits) try self.extractField(Type.anyerror, err_union_id, eu_layout.errorFieldIndex()) else @@ -2929,12 +2929,12 @@ pub const DeclGen = struct { const err_union_ty = self.typeOf(ty_op.operand); const err_ty_ref = try self.resolveType(Type.anyerror, .direct); - if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { // No error possible, so just return undefined. return try self.spv.constUndef(err_ty_ref); } - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const eu_layout = self.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { @@ -2948,9 +2948,10 @@ pub const DeclGen = struct { fn airWrapErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_union_ty = self.typeOfIndex(inst); - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const operand_id = try self.resolve(ty_op.operand); const eu_layout = self.errorUnionLayout(payload_ty); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index f4f19f30d024..4d8e86562235 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -18,6 +18,7 @@ const LinkBlock = File.LinkBlock; const LinkFn = File.LinkFn; const LinkerLoad = @import("../codegen.zig").LinkerLoad; const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const StringTable = @import("strtab.zig").StringTable; const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; @@ -518,9 +519,9 @@ pub const DeclState = struct { ); }, .ErrorUnion => { - const error_ty = ty.errorUnionSet(); - const payload_ty = ty.errorUnionPayload(); - const payload_align = if (payload_ty.isNoReturn()) 0 else payload_ty.abiAlignment(mod); + const error_ty = ty.errorUnionSet(mod); + const payload_ty = ty.errorUnionPayload(mod); + const payload_align = if (payload_ty.isNoReturn(mod)) 0 else payload_ty.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod); const abi_size = ty.abiSize(mod); const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(mod) else 0; @@ -534,7 +535,7 @@ pub const DeclState = struct { const name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.writer().print("{s}\x00", .{name}); - if (!payload_ty.isNoReturn()) { + if (!payload_ty.isNoReturn(mod)) { // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(7); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -1266,10 +1267,11 @@ pub fn commitDeclState( const symbol = &decl_state.abbrev_table.items[sym_index]; const ty = symbol.type; const deferred: bool = blk: { - if (ty.isAnyError()) break :blk true; - switch (ty.tag()) { - .error_set_inferred => { - if (!ty.castTag(.error_set_inferred).?.data.is_resolved) break :blk true; + if (ty.isAnyError(mod)) break :blk true; + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .inferred_error_set_type => |ies_index| { + const ies = mod.inferredErrorSetPtr(ies_index); + if (!ies.is_resolved) break :blk true; }, else => {}, } @@ -1290,10 +1292,11 @@ pub fn commitDeclState( const symbol = decl_state.abbrev_table.items[target]; const ty = symbol.type; const deferred: bool = blk: { - if (ty.isAnyError()) break :blk true; - switch (ty.tag()) { - .error_set_inferred => { - if (!ty.castTag(.error_set_inferred).?.data.is_resolved) break :blk true; + if (ty.isAnyError(mod)) break :blk true; + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .inferred_error_set_type => |ies_index| { + const ies = mod.inferredErrorSetPtr(ies_index); + if (!ies.is_resolved) break :blk true; }, else => {}, } @@ -2529,18 +2532,22 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { defer arena_alloc.deinit(); const arena = arena_alloc.allocator(); - const error_set = try arena.create(Module.ErrorSet); - const error_ty = try Type.Tag.error_set.create(arena, error_set); - var names = Module.ErrorSet.NameMap{}; - try names.ensureUnusedCapacity(arena, module.global_error_set.count()); - var it = module.global_error_set.keyIterator(); - while (it.next()) |key| { - names.putAssumeCapacityNoClobber(key.*, {}); + // TODO: don't create a zig type for this, just make the dwarf info + // without touching the zig type system. + const names = try arena.alloc(InternPool.NullTerminatedString, module.global_error_set.count()); + { + var it = module.global_error_set.keyIterator(); + var i: usize = 0; + while (it.next()) |key| : (i += 1) { + names[i] = module.intern_pool.getString(key.*).unwrap().?; + } } - error_set.names = names; + std.mem.sort(InternPool.NullTerminatedString, names, {}, InternPool.NullTerminatedString.indexLessThan); + + const error_ty = try module.intern(.{ .error_set_type = .{ .names = names } }); var dbg_info_buffer = std.ArrayList(u8).init(arena); - try addDbgInfoErrorSet(arena, module, error_ty, self.target, &dbg_info_buffer); + try addDbgInfoErrorSet(arena, module, error_ty.toType(), self.target, &dbg_info_buffer); const di_atom_index = try self.createAtom(.di_atom); log.debug("updateDeclDebugInfoAllocation in flushModule", .{}); @@ -2684,8 +2691,9 @@ fn addDbgInfoErrorSet( // DW.AT.const_value, DW.FORM.data8 mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian); - const error_names = ty.errorSetNames(); - for (error_names) |error_name| { + const error_names = ty.errorSetNames(mod); + for (error_names) |error_name_ip| { + const error_name = mod.intern_pool.stringToSlice(error_name_ip); const kv = mod.getErrorValue(error_name) catch unreachable; // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(error_name.len + 2 + @sizeOf(u64)); diff --git a/src/print_air.zig b/src/print_air.zig index 8cff41777094..0e4f2d16cf2f 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -370,7 +370,6 @@ const Writer = struct { .none => switch (ty.tag()) { .inferred_alloc_const => try s.writeAll("(inferred_alloc_const)"), .inferred_alloc_mut => try s.writeAll("(inferred_alloc_mut)"), - else => try ty.print(s, w.module), }, else => try ty.print(s, w.module), } diff --git a/src/type.zig b/src/type.zig index ebe3d52b05f9..4e90cbd34d89 100644 --- a/src/type.zig +++ b/src/type.zig @@ -36,17 +36,9 @@ pub const Type = struct { pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { switch (ty.ip_index) { .none => switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => return .ErrorSet, - .inferred_alloc_const, .inferred_alloc_mut, => return .Pointer, - - .error_union => return .ErrorUnion, }, else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => .Int, @@ -55,6 +47,7 @@ pub const Type = struct { .vector_type => .Vector, .opt_type => .Optional, .error_union_type => .ErrorUnion, + .error_set_type, .inferred_error_set_type => .ErrorSet, .struct_type, .anon_struct_type => .Struct, .union_type => .Union, .opaque_type => .Opaque, @@ -130,9 +123,9 @@ pub const Type = struct { } } - pub fn baseZigTypeTag(self: Type, mod: *const Module) std.builtin.TypeId { + pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId { return switch (self.zigTypeTag(mod)) { - .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(mod), + .ErrorUnion => self.errorUnionPayload(mod).baseZigTypeTag(mod), .Optional => { return self.optionalChild(mod).baseZigTypeTag(mod); }, @@ -294,35 +287,6 @@ pub const Type = struct { if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true; switch (a.tag()) { - .error_set_inferred => { - // Inferred error sets are only equal if both are inferred - // and they share the same pointer. - const a_ies = a.castTag(.error_set_inferred).?.data; - const b_ies = (b.castTag(.error_set_inferred) orelse return false).data; - return a_ies == b_ies; - }, - - .error_set, - .error_set_single, - .error_set_merged, - => { - switch (b.tag()) { - .error_set, .error_set_single, .error_set_merged => {}, - else => return false, - } - - // Two resolved sets match if their error set names match. - // Since they are pre-sorted we compare them element-wise. - const a_set = a.errorSetNames(); - const b_set = b.errorSetNames(); - if (a_set.len != b_set.len) return false; - for (a_set, 0..) |a_item, i| { - const b_item = b_set[i]; - if (!std.mem.eql(u8, a_item, b_item)) return false; - } - return true; - }, - .inferred_alloc_const, .inferred_alloc_mut, => { @@ -367,20 +331,6 @@ pub const Type = struct { return true; }, - - .error_union => { - if (b.zigTypeTag(mod) != .ErrorUnion) return false; - - const a_set = a.errorUnionSet(); - const b_set = b.errorUnionSet(); - if (!a_set.eql(b_set, mod)) return false; - - const a_payload = a.errorUnionPayload(); - const b_payload = b.errorUnionPayload(); - if (!a_payload.eql(b_payload, mod)) return false; - - return true; - }, } } @@ -399,28 +349,6 @@ pub const Type = struct { return; } switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_merged, - => { - // all are treated like an "error set" for hashing - std.hash.autoHash(hasher, std.builtin.TypeId.ErrorSet); - std.hash.autoHash(hasher, Tag.error_set); - - const names = ty.errorSetNames(); - std.hash.autoHash(hasher, names.len); - assert(std.sort.isSorted([]const u8, names, u8, std.mem.lessThan)); - for (names) |name| hasher.update(name); - }, - - .error_set_inferred => { - // inferred error sets are compared using their data pointer - const ies: *Module.Fn.InferredErrorSet = ty.castTag(.error_set_inferred).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.ErrorSet); - std.hash.autoHash(hasher, Tag.error_set_inferred); - std.hash.autoHash(hasher, ies); - }, - .inferred_alloc_const, .inferred_alloc_mut, => { @@ -439,16 +367,6 @@ pub const Type = struct { std.hash.autoHash(hasher, info.@"volatile"); std.hash.autoHash(hasher, info.size); }, - - .error_union => { - std.hash.autoHash(hasher, std.builtin.TypeId.ErrorUnion); - - const set_ty = ty.errorUnionSet(); - hashWithHasher(set_ty, hasher, mod); - - const payload_ty = ty.errorUnionPayload(); - hashWithHasher(payload_ty, hasher, mod); - }, } } @@ -484,52 +402,6 @@ pub const Type = struct { } }; - pub fn copy(self: Type, allocator: Allocator) error{OutOfMemory}!Type { - if (self.ip_index != .none) { - return Type{ .ip_index = self.ip_index, .legacy = undefined }; - } - if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { - return Type{ - .ip_index = .none, - .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, - }; - } else switch (self.legacy.ptr_otherwise.tag) { - .inferred_alloc_const, - .inferred_alloc_mut, - => unreachable, - - .error_union => { - const payload = self.castTag(.error_union).?.data; - return Tag.error_union.create(allocator, .{ - .error_set = try payload.error_set.copy(allocator), - .payload = try payload.payload.copy(allocator), - }); - }, - .error_set_merged => { - const names = self.castTag(.error_set_merged).?.data.keys(); - var duped_names = Module.ErrorSet.NameMap{}; - try duped_names.ensureTotalCapacity(allocator, names.len); - for (names) |name| { - duped_names.putAssumeCapacityNoClobber(name, {}); - } - return Tag.error_set_merged.create(allocator, duped_names); - }, - .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet), - .error_set_inferred => return self.copyPayloadShallow(allocator, Payload.ErrorSetInferred), - .error_set_single => return self.copyPayloadShallow(allocator, Payload.Name), - } - } - - fn copyPayloadShallow(self: Type, allocator: Allocator, comptime T: type) error{OutOfMemory}!Type { - const payload = self.cast(T).?; - const new_payload = try allocator.create(T); - new_payload.* = payload.*; - return Type{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - } - pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { _ = ty; _ = unused_fmt_string; @@ -575,62 +447,7 @@ pub const Type = struct { ) @TypeOf(writer).Error!void { _ = options; comptime assert(unused_format_string.len == 0); - if (start_type.ip_index != .none) { - return writer.print("(intern index: {d})", .{@enumToInt(start_type.ip_index)}); - } - if (true) { - // This is disabled to work around a stage2 bug where this function recursively - // causes more generic function instantiations resulting in an infinite loop - // in the compiler. - try writer.writeAll("[TODO fix internal compiler bug regarding dump]"); - return; - } - var ty = start_type; - while (true) { - const t = ty.tag(); - switch (t) { - .error_union => { - const payload = ty.castTag(.error_union).?.data; - try payload.error_set.dump("", .{}, writer); - try writer.writeAll("!"); - ty = payload.payload; - continue; - }, - .error_set => { - const names = ty.castTag(.error_set).?.data.names.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); - } - try writer.writeAll("}"); - return; - }, - .error_set_inferred => { - const func = ty.castTag(.error_set_inferred).?.data.func; - return writer.print("({s} func={d})", .{ - @tagName(t), func.owner_decl, - }); - }, - .error_set_merged => { - const names = ty.castTag(.error_set_merged).?.data.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); - } - try writer.writeAll("}"); - return; - }, - .error_set_single => { - const name = ty.castTag(.error_set_single).?.data; - return writer.print("error{{{s}}}", .{name}); - }, - .inferred_alloc_const => return writer.writeAll("(inferred_alloc_const)"), - .inferred_alloc_mut => return writer.writeAll("(inferred_alloc_mut)"), - } - unreachable; - } + return writer.print("{any}", .{start_type.ip_index}); } pub const nameAllocArena = nameAlloc; @@ -648,45 +465,6 @@ pub const Type = struct { .none => switch (ty.tag()) { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - - .error_set_inferred => { - const func = ty.castTag(.error_set_inferred).?.data.func; - - try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - const owner_decl = mod.declPtr(func.owner_decl); - try owner_decl.renderFullyQualifiedName(mod, writer); - try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); - }, - - .error_union => { - const error_union = ty.castTag(.error_union).?.data; - try print(error_union.error_set, writer, mod); - try writer.writeAll("!"); - try print(error_union.payload, writer, mod); - }, - - .error_set => { - const names = ty.castTag(.error_set).?.data.names.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); - } - try writer.writeAll("}"); - }, - .error_set_single => { - const name = ty.castTag(.error_set_single).?.data; - return writer.print("error{{{s}}}", .{name}); - }, - .error_set_merged => { - const names = ty.castTag(.error_set_merged).?.data.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); - } - try writer.writeAll("}"); - }, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { @@ -766,6 +544,24 @@ pub const Type = struct { try print(error_union_type.payload_type.toType(), writer, mod); return; }, + .inferred_error_set_type => |index| { + const ies = mod.inferredErrorSetPtr(index); + const func = ies.func; + + try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); + const owner_decl = mod.declPtr(func.owner_decl); + try owner_decl.renderFullyQualifiedName(mod, writer); + try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); + }, + .error_set_type => |error_set_type| { + const names = error_set_type.names; + try writer.writeAll("error{"); + for (names, 0..) |name, i| { + if (i != 0) try writer.writeByte(','); + try writer.writeAll(mod.intern_pool.stringToSlice(name)); + } + try writer.writeAll("}"); + }, .simple_type => |s| return writer.writeAll(@tagName(s)), .struct_type => |struct_type| { if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { @@ -881,13 +677,8 @@ pub const Type = struct { return ty.ip_index; } - pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { - if (self.ip_index != .none) return self.ip_index.toValue(); - switch (self.tag()) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - else => return Value.Tag.ty.create(allocator, self), - } + pub fn toValue(self: Type) Value { + return self.toIntern().toValue(); } const RuntimeBitsError = Module.CompileError || error{NeedLazy}; @@ -914,14 +705,6 @@ pub const Type = struct { .empty_struct_type => return false, .none => switch (ty.tag()) { - .error_set_inferred, - - .error_set_single, - .error_union, - .error_set, - .error_set_merged, - => return true, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -951,7 +734,7 @@ pub const Type = struct { }, .opt_type => |child| { const child_ty = child.toType(); - if (child_ty.isNoReturn()) { + if (child_ty.isNoReturn(mod)) { // Then the optional is comptime-known to be null. return false; } @@ -963,7 +746,10 @@ pub const Type = struct { return !comptimeOnly(child_ty, mod); } }, - .error_union_type => @panic("TODO"), + .error_union_type, + .error_set_type, + .inferred_error_set_type, + => true, // These are function *bodies*, not pointers. // They return false here because they are comptime-only types. @@ -1103,112 +889,99 @@ pub const Type = struct { /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { - return switch (ty.ip_index) { - .empty_struct_type => false, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type, + .ptr_type, + .vector_type, + => true, - .none => switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .error_union, - => false, + .error_union_type, + .error_set_type, + .inferred_error_set_type, + .anon_struct_type, + .opaque_type, + .anyframe_type, + // These are function bodies, not function pointers. + .func_type, + => false, - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type, - .ptr_type, - .vector_type, + .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), + .opt_type => ty.isPtrLikeOptional(mod), + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .void, => true, - .error_union_type, - .anon_struct_type, - .opaque_type, - .anyframe_type, - // These are function bodies, not function pointers. - .func_type, + .anyerror, + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + .generic_poison, => false, - .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), - .opt_type => ty.isPtrLikeOptional(mod), - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .bool, - .void, - => true, - - .anyerror, - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .enum_literal, - .type_info, - .generic_poison, - => false, - - .var_args_param => unreachable, - }, - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { - // Struct with no fields has a well-defined layout of no bits. - return true; - }; - return struct_obj.layout != .Auto; - }, - .union_type => |union_type| switch (union_type.runtime_tag) { - .none, .safety => mod.unionPtr(union_type.index).layout != .Auto, - .tagged => false, - }, - .enum_type => |enum_type| switch (enum_type.tag_mode) { - .auto => false, - .explicit, .nonexhaustive => true, - }, - - // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .var_args_param => unreachable, + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { + // Struct with no fields has a well-defined layout of no bits. + return true; + }; + return struct_obj.layout != .Auto; + }, + .union_type => |union_type| switch (union_type.runtime_tag) { + .none, .safety => mod.unionPtr(union_type.index).layout != .Auto, + .tagged => false, }, + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .auto => false, + .explicit, .nonexhaustive => true, + }, + + // values, not types + .undef => unreachable, + .un => unreachable, + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .float => unreachable, + .ptr => unreachable, + .opt => unreachable, + .enum_tag => unreachable, + .aggregate => unreachable, }; } @@ -1247,35 +1020,8 @@ pub const Type = struct { }; } - pub fn isNoReturn(ty: Type) bool { - switch (@enumToInt(ty.ip_index)) { - @enumToInt(InternPool.Index.first_type)...@enumToInt(InternPool.Index.noreturn_type) - 1 => return false, - - @enumToInt(InternPool.Index.noreturn_type) => return true, - - @enumToInt(InternPool.Index.noreturn_type) + 1...@enumToInt(InternPool.Index.last_type) => return false, - - @enumToInt(InternPool.Index.first_value)...@enumToInt(InternPool.Index.last_value) => unreachable, - @enumToInt(InternPool.Index.generic_poison) => unreachable, - - // TODO add empty error sets here - // TODO add enums with no fields here - else => return false, - - @enumToInt(InternPool.Index.none) => switch (ty.tag()) { - .error_set => { - const err_set_obj = ty.castTag(.error_set).?.data; - const names = err_set_obj.names.keys(); - return names.len == 0; - }, - .error_set_merged => { - const name_map = ty.castTag(.error_set_merged).?.data; - const names = name_map.keys(); - return names.len == 0; - }, - else => return false, - }, - } + pub fn isNoReturn(ty: Type, mod: *Module) bool { + return mod.intern_pool.isNoReturn(ty.ip_index); } /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. @@ -1353,21 +1099,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, - .none => switch (ty.tag()) { - - // TODO revisit this when we have the concept of the error tag type - .error_set_inferred, - .error_set_single, - .error_set, - .error_set_merged, - => return AbiAlignmentAdvanced{ .scalar = 2 }, - - .error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), - - .inferred_alloc_const, - .inferred_alloc_mut, - => unreachable, - }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; @@ -1388,7 +1119,11 @@ pub const Type = struct { }, .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), - .error_union_type => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), + .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, info.payload_type.toType()), + + // TODO revisit this when we have the concept of the error tag type + .error_set_type, .inferred_error_set_type => return AbiAlignmentAdvanced{ .scalar = 2 }, + // represents machine code; not a pointer .func_type => |func_type| return AbiAlignmentAdvanced{ .scalar = if (func_type.alignment.toByteUnitsOptional()) |a| @@ -1572,14 +1307,14 @@ pub const Type = struct { ty: Type, mod: *Module, strat: AbiAlignmentAdvancedStrat, + payload_ty: Type, ) Module.CompileError!AbiAlignmentAdvanced { // This code needs to be kept in sync with the equivalent switch prong // in abiSizeAdvanced. - const data = ty.castTag(.error_union).?.data; const code_align = abiAlignment(Type.anyerror, mod); switch (strat) { .eager, .sema => { - if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) { @@ -1587,11 +1322,11 @@ pub const Type = struct { } return AbiAlignmentAdvanced{ .scalar = @max( code_align, - (try data.payload.abiAlignmentAdvanced(mod, strat)).scalar, + (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, ) }; }, .lazy => |arena| { - switch (try data.payload.abiAlignmentAdvanced(mod, strat)) { + switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |payload_align| { return AbiAlignmentAdvanced{ .scalar = @max(code_align, payload_align), @@ -1728,55 +1463,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, - .none => switch (ty.tag()) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - // TODO revisit this when we have the concept of the error tag type - .error_set_inferred, - .error_set, - .error_set_merged, - .error_set_single, - => return AbiSizeAdvanced{ .scalar = 2 }, - - .error_union => { - // This code needs to be kept in sync with the equivalent switch prong - // in abiAlignmentAdvanced. - const data = ty.castTag(.error_union).?.data; - const code_size = abiSize(Type.anyerror, mod); - if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, - else => |e| return e, - })) { - // Same as anyerror. - return AbiSizeAdvanced{ .scalar = code_size }; - } - const code_align = abiAlignment(Type.anyerror, mod); - const payload_align = abiAlignment(data.payload, mod); - const payload_size = switch (try data.payload.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| elem_size, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, - }, - }; - - var size: u64 = 0; - if (code_align > payload_align) { - size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); - size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); - } else { - size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); - size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); - } - return AbiSizeAdvanced{ .scalar = size }; - }, - }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; @@ -1816,12 +1502,52 @@ pub const Type = struct { .val = try Value.Tag.lazy_size.create(strat.lazy, ty), }, }; - const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); - return AbiSizeAdvanced{ .scalar = result }; - }, + const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); + return AbiSizeAdvanced{ .scalar = result }; + }, + + .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), + + // TODO revisit this when we have the concept of the error tag type + .error_set_type, .inferred_error_set_type => return AbiSizeAdvanced{ .scalar = 2 }, + + .error_union_type => |error_union_type| { + const payload_ty = error_union_type.payload_type.toType(); + // This code needs to be kept in sync with the equivalent switch prong + // in abiAlignmentAdvanced. + const code_size = abiSize(Type.anyerror, mod); + if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, + else => |e| return e, + })) { + // Same as anyerror. + return AbiSizeAdvanced{ .scalar = code_size }; + } + const code_align = abiAlignment(Type.anyerror, mod); + const payload_align = abiAlignment(payload_ty, mod); + const payload_size = switch (try payload_ty.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| elem_size, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + }, + }; - .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), - .error_union_type => @panic("TODO"), + var size: u64 = 0; + if (code_align > payload_align) { + size += code_size; + size = std.mem.alignForwardGeneric(u64, size, payload_align); + size += payload_size; + size = std.mem.alignForwardGeneric(u64, size, code_align); + } else { + size += payload_size; + size = std.mem.alignForwardGeneric(u64, size, code_align); + size += code_size; + size = std.mem.alignForwardGeneric(u64, size, payload_align); + } + return AbiSizeAdvanced{ .scalar = size }; + }, .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { .bool, @@ -1982,7 +1708,7 @@ pub const Type = struct { ) Module.CompileError!AbiSizeAdvanced { const child_ty = ty.optionalChild(mod); - if (child_ty.isNoReturn()) { + if (child_ty.isNoReturn(mod)) { return AbiSizeAdvanced{ .scalar = 0 }; } @@ -2041,147 +1767,137 @@ pub const Type = struct { const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => return 16, // TODO revisit this when we have the concept of the error tag type + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.bits, + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth() * 2, + }, + .anyframe_type => return target.ptrBitWidth(), + + .array_type => |array_type| { + const len = array_type.len + @boolToInt(array_type.sentinel != .none); + if (len == 0) return 0; + const elem_ty = array_type.child.toType(); + const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); + if (elem_size == 0) return 0; + const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); + return (len - 1) * 8 * elem_size + elem_bit_size; + }, + .vector_type => |vector_type| { + const child_ty = vector_type.child.toType(); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); + return elem_bit_size * vector_type.len; + }, + .opt_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, + + // TODO revisit this when we have the concept of the error tag type + .error_set_type, .inferred_error_set_type => return 16, + + .error_union_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, + .func_type => unreachable, // represents machine code; not a pointer + .simple_type => |t| switch (t) { + .f16 => return 16, + .f32 => return 32, + .f64 => return 64, + .f80 => return 80, + .f128 => return 128, + + .usize, + .isize, + => return target.ptrBitWidth(), + + .c_char => return target.c_type_bit_size(.char), + .c_short => return target.c_type_bit_size(.short), + .c_ushort => return target.c_type_bit_size(.ushort), + .c_int => return target.c_type_bit_size(.int), + .c_uint => return target.c_type_bit_size(.uint), + .c_long => return target.c_type_bit_size(.long), + .c_ulong => return target.c_type_bit_size(.ulong), + .c_longlong => return target.c_type_bit_size(.longlong), + .c_ulonglong => return target.c_type_bit_size(.ulonglong), + .c_longdouble => return target.c_type_bit_size(.longdouble), + + .bool => return 1, + .void => return 0, - .error_union => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; - }, + // TODO revisit this when we have the concept of the error tag type + .anyerror => return 16, + + .anyopaque => unreachable, + .type => unreachable, + .comptime_int => unreachable, + .comptime_float => unreachable, + .noreturn => unreachable, + .null => unreachable, + .undefined => unreachable, + .enum_literal => unreachable, + .generic_poison => unreachable, + .var_args_param => unreachable, + + .atomic_order => unreachable, // missing call to resolveTypeFields + .atomic_rmw_op => unreachable, // missing call to resolveTypeFields + .calling_convention => unreachable, // missing call to resolveTypeFields + .address_space => unreachable, // missing call to resolveTypeFields + .float_mode => unreachable, // missing call to resolveTypeFields + .reduce_op => unreachable, // missing call to resolveTypeFields + .call_modifier => unreachable, // missing call to resolveTypeFields + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + .type_info => unreachable, // missing call to resolveTypeFields }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type.bits, - .ptr_type => |ptr_type| switch (ptr_type.size) { - .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth() * 2, - }, - .anyframe_type => return target.ptrBitWidth(), - - .array_type => |array_type| { - const len = array_type.len + @boolToInt(array_type.sentinel != .none); - if (len == 0) return 0; - const elem_ty = array_type.child.toType(); - const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); - if (elem_size == 0) return 0; - const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); - return (len - 1) * 8 * elem_size + elem_bit_size; - }, - .vector_type => |vector_type| { - const child_ty = vector_type.child.toType(); - const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); - return elem_bit_size * vector_type.len; - }, - .opt_type => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; - }, - .error_union_type => @panic("TODO"), - .func_type => unreachable, // represents machine code; not a pointer - .simple_type => |t| switch (t) { - .f16 => return 16, - .f32 => return 32, - .f64 => return 64, - .f80 => return 80, - .f128 => return 128, - - .usize, - .isize, - => return target.ptrBitWidth(), - - .c_char => return target.c_type_bit_size(.char), - .c_short => return target.c_type_bit_size(.short), - .c_ushort => return target.c_type_bit_size(.ushort), - .c_int => return target.c_type_bit_size(.int), - .c_uint => return target.c_type_bit_size(.uint), - .c_long => return target.c_type_bit_size(.long), - .c_ulong => return target.c_type_bit_size(.ulong), - .c_longlong => return target.c_type_bit_size(.longlong), - .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .c_longdouble => return target.c_type_bit_size(.longdouble), - - .bool => return 1, - .void => return 0, - - // TODO revisit this when we have the concept of the error tag type - .anyerror => return 16, - - .anyopaque => unreachable, - .type => unreachable, - .comptime_int => unreachable, - .comptime_float => unreachable, - .noreturn => unreachable, - .null => unreachable, - .undefined => unreachable, - .enum_literal => unreachable, - .generic_poison => unreachable, - .var_args_param => unreachable, - - .atomic_order => unreachable, // missing call to resolveTypeFields - .atomic_rmw_op => unreachable, // missing call to resolveTypeFields - .calling_convention => unreachable, // missing call to resolveTypeFields - .address_space => unreachable, // missing call to resolveTypeFields - .float_mode => unreachable, // missing call to resolveTypeFields - .reduce_op => unreachable, // missing call to resolveTypeFields - .call_modifier => unreachable, // missing call to resolveTypeFields - .prefetch_options => unreachable, // missing call to resolveTypeFields - .export_options => unreachable, // missing call to resolveTypeFields - .extern_options => unreachable, // missing call to resolveTypeFields - .type_info => unreachable, // missing call to resolveTypeFields - }, - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; - if (struct_obj.layout != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); - assert(struct_obj.haveLayout()); - return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); - }, - - .anon_struct_type => { - if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; + if (struct_obj.layout != .Packed) { return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - }, + } + if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); + assert(struct_obj.haveLayout()); + return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); + }, - .union_type => |union_type| { - if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); - if (ty.containerLayout(mod) != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - const union_obj = mod.unionPtr(union_type.index); - assert(union_obj.haveFieldTypes()); + .anon_struct_type => { + if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + }, - var size: u64 = 0; - for (union_obj.fields.values()) |field| { - size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); - } - return size; - }, - .opaque_type => unreachable, - .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema), + .union_type => |union_type| { + if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + if (ty.containerLayout(mod) != .Packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + } + const union_obj = mod.unionPtr(union_type.index); + assert(union_obj.haveFieldTypes()); - // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + var size: u64 = 0; + for (union_obj.fields.values()) |field| { + size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); + } + return size; }, + .opaque_type => unreachable, + .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema), + + // values, not types + .undef => unreachable, + .un => unreachable, + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .float => unreachable, + .ptr => unreachable, + .opt => unreachable, + .enum_tag => unreachable, + .aggregate => unreachable, } } @@ -2210,7 +1926,7 @@ pub const Type = struct { return payload_ty.layoutIsResolved(mod); }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); return payload_ty.layoutIsResolved(mod); }, else => return true, @@ -2223,8 +1939,6 @@ pub const Type = struct { .inferred_alloc_const, .inferred_alloc_mut, => true, - - else => false, }, else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_info| ptr_info.size == .One, @@ -2245,8 +1959,6 @@ pub const Type = struct { .inferred_alloc_const, .inferred_alloc_mut, => .One, - - else => null, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_info| ptr_info.size, @@ -2534,69 +2246,43 @@ pub const Type = struct { } /// Asserts that the type is an error union. - pub fn errorUnionPayload(ty: Type) Type { - return switch (ty.ip_index) { - .anyerror_void_error_union_type => Type.void, - .none => switch (ty.tag()) { - .error_union => ty.castTag(.error_union).?.data.payload, - else => unreachable, - }, - else => @panic("TODO"), - }; + pub fn errorUnionPayload(ty: Type, mod: *Module) Type { + return mod.intern_pool.indexToKey(ty.ip_index).error_union_type.payload_type.toType(); } - pub fn errorUnionSet(ty: Type) Type { - return switch (ty.ip_index) { - .anyerror_void_error_union_type => Type.anyerror, - .none => switch (ty.tag()) { - .error_union => ty.castTag(.error_union).?.data.error_set, - else => unreachable, - }, - else => @panic("TODO"), - }; + /// Asserts that the type is an error union. + pub fn errorUnionSet(ty: Type, mod: *Module) Type { + return mod.intern_pool.indexToKey(ty.ip_index).error_union_type.error_set_type.toType(); } /// Returns false for unresolved inferred error sets. - pub fn errorSetIsEmpty(ty: Type, mod: *const Module) bool { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .error_set_inferred => { - const inferred_error_set = ty.castTag(.error_set_inferred).?.data; + pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { + return switch (ty.ip_index) { + .anyerror_type => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .error_set_type => |error_set_type| error_set_type.names.len == 0, + .inferred_error_set_type => |index| { + const inferred_error_set = mod.inferredErrorSetPtr(index); // Can't know for sure. if (!inferred_error_set.is_resolved) return false; if (inferred_error_set.is_anyerror) return false; return inferred_error_set.errors.count() == 0; }, - .error_set_single => return false, - .error_set => { - const err_set_obj = ty.castTag(.error_set).?.data; - return err_set_obj.names.count() == 0; - }, - .error_set_merged => { - const name_map = ty.castTag(.error_set_merged).?.data; - return name_map.count() == 0; - }, else => unreachable, }, - .anyerror_type => return false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - else => @panic("TODO"), - }, - } + }; } /// Returns true if it is an error set that includes anyerror, false otherwise. /// Note that the result may be a false negative if the type did not get error set /// resolution prior to this call. - pub fn isAnyError(ty: Type) bool { + pub fn isAnyError(ty: Type, mod: *Module) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .error_set_inferred => ty.castTag(.error_set_inferred).?.data.is_anyerror, + .anyerror_type => true, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .inferred_error_set_type => |i| mod.inferredErrorSetPtr(i).is_anyerror, else => false, }, - .anyerror_type => true, - // TODO handle error_set_inferred here - else => false, }; } @@ -2610,30 +2296,50 @@ pub const Type = struct { /// Returns whether ty, which must be an error set, includes an error `name`. /// Might return a false negative if `ty` is an inferred error set and not fully /// resolved yet. - pub fn errorSetHasField(ty: Type, name: []const u8) bool { - if (ty.isAnyError()) { - return true; - } - - switch (ty.tag()) { - .error_set_single => { - const data = ty.castTag(.error_set_single).?.data; - return std.mem.eql(u8, data, name); - }, - .error_set_inferred => { - const data = ty.castTag(.error_set_inferred).?.data; - return data.errors.contains(name); - }, - .error_set_merged => { - const data = ty.castTag(.error_set_merged).?.data; - return data.contains(name); + pub fn errorSetHasFieldIp( + ip: *const InternPool, + ty: InternPool.Index, + name: InternPool.NullTerminatedString, + ) bool { + return switch (ty) { + .anyerror_type => true, + else => switch (ip.indexToKey(ty)) { + .error_set_type => |error_set_type| { + return error_set_type.nameIndex(ip, name) != null; + }, + .inferred_error_set_type => |index| { + const ies = ip.inferredErrorSetPtrConst(index); + if (ies.is_anyerror) return true; + return ies.errors.contains(name); + }, + else => unreachable, }, - .error_set => { - const data = ty.castTag(.error_set).?.data; - return data.names.contains(name); + }; + } + + /// Returns whether ty, which must be an error set, includes an error `name`. + /// Might return a false negative if `ty` is an inferred error set and not fully + /// resolved yet. + pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ty.ip_index) { + .anyerror_type => true, + else => switch (ip.indexToKey(ty.ip_index)) { + .error_set_type => |error_set_type| { + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return error_set_type.nameIndex(ip, field_name_interned) != null; + }, + .inferred_error_set_type => |index| { + const ies = ip.inferredErrorSetPtr(index); + if (ies.is_anyerror) return true; + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return ies.errors.contains(field_name_interned); + }, + else => unreachable, }, - else => unreachable, - } + }; } /// Asserts the type is an array or vector or struct. @@ -2727,14 +2433,6 @@ pub const Type = struct { var ty = starting_ty; while (true) switch (ty.ip_index) { - .none => switch (ty.tag()) { - .error_set, .error_set_single, .error_set_inferred, .error_set_merged => { - // TODO revisit this when error sets support custom int types - return .{ .signedness = .unsigned, .bits = 16 }; - }, - - else => unreachable, - }, .anyerror_type => { // TODO revisit this when error sets support custom int types return .{ .signedness = .unsigned, .bits = 16 }; @@ -2760,6 +2458,9 @@ pub const Type = struct { .enum_type => |enum_type| ty = enum_type.tag_ty.toType(), .vector_type => |vector_type| ty = vector_type.child.toType(), + // TODO revisit this when error sets support custom int types + .error_set_type, .inferred_error_set_type => return .{ .signedness = .unsigned, .bits = 16 }, + .anon_struct_type => unreachable, .ptr_type => unreachable, @@ -2932,13 +2633,6 @@ pub const Type = struct { .empty_struct_type => return Value.empty_struct, .none => switch (ty.tag()) { - .error_union, - .error_set_single, - .error_set, - .error_set_merged, - .error_set_inferred, - => return null, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -2955,6 +2649,8 @@ pub const Type = struct { .error_union_type, .func_type, .anyframe_type, + .error_set_type, + .inferred_error_set_type, => return null, .array_type => |array_type| { @@ -3130,18 +2826,6 @@ pub const Type = struct { return switch (ty.ip_index) { .empty_struct_type => false, - .none => switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => false, - - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - - .error_union => return ty.errorUnionPayload().comptimeOnly(mod), - }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, .ptr_type => |ptr_type| { @@ -3160,6 +2844,11 @@ pub const Type = struct { .vector_type => |vector_type| vector_type.child.toType().comptimeOnly(mod), .opt_type => |child| child.toType().comptimeOnly(mod), .error_union_type => |error_union_type| error_union_type.payload_type.toType().comptimeOnly(mod), + + .error_set_type, + .inferred_error_set_type, + => false, + // These are function bodies, not function pointers. .func_type => true, @@ -3418,17 +3107,11 @@ pub const Type = struct { } // Asserts that `ty` is an error set and not `anyerror`. - pub fn errorSetNames(ty: Type) []const []const u8 { - return switch (ty.tag()) { - .error_set_single => blk: { - // Work around coercion problems - const tmp: *const [1][]const u8 = &ty.castTag(.error_set_single).?.data; - break :blk tmp; - }, - .error_set_merged => ty.castTag(.error_set_merged).?.data.keys(), - .error_set => ty.castTag(.error_set).?.data.names.keys(), - .error_set_inferred => { - const inferred_error_set = ty.castTag(.error_set_inferred).?.data; + pub fn errorSetNames(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .error_set_type => |x| x.names, + .inferred_error_set_type => |index| { + const inferred_error_set = mod.inferredErrorSetPtr(index); assert(inferred_error_set.is_resolved); assert(!inferred_error_set.is_anyerror); return inferred_error_set.errors.keys(); @@ -3437,26 +3120,6 @@ pub const Type = struct { }; } - /// Merge lhs with rhs. - /// Asserts that lhs and rhs are both error sets and are resolved. - pub fn errorSetMerge(lhs: Type, arena: Allocator, rhs: Type) !Type { - const lhs_names = lhs.errorSetNames(); - const rhs_names = rhs.errorSetNames(); - var names: Module.ErrorSet.NameMap = .{}; - try names.ensureUnusedCapacity(arena, lhs_names.len); - for (lhs_names) |name| { - names.putAssumeCapacityNoClobber(name, {}); - } - for (rhs_names) |name| { - try names.put(arena, name, {}); - } - - // names must be sorted - Module.ErrorSet.sortNames(&names); - - return try Tag.error_set_merged.create(arena, names); - } - pub fn enumFields(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { return mod.intern_pool.indexToKey(ty.ip_index).enum_type.names; } @@ -3748,30 +3411,19 @@ pub const Type = struct { } pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc { - switch (ty.ip_index) { - .empty_struct_type => return null, - .none => switch (ty.tag()) { - .error_set => { - const error_set = ty.castTag(.error_set).?.data; - return error_set.srcLoc(mod); - }, - - else => return null, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + return struct_obj.srcLoc(mod); }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - return struct_obj.srcLoc(mod); - }, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - return union_obj.srcLoc(mod); - }, - .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), - .enum_type => |enum_type| mod.declPtr(enum_type.decl).srcLoc(mod), - else => null, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.srcLoc(mod); }, - } + .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), + .enum_type => |enum_type| mod.declPtr(enum_type.decl).srcLoc(mod), + else => null, + }; } pub fn getOwnerDecl(ty: Type, mod: *Module) Module.Decl.Index { @@ -3779,39 +3431,25 @@ pub const Type = struct { } pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?Module.Decl.Index { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .error_set => { - const error_set = ty.castTag(.error_set).?.data; - return error_set.owner_decl; - }, - - else => return null, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null; + return struct_obj.owner_decl; }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null; - return struct_obj.owner_decl; - }, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - return union_obj.owner_decl; - }, - .opaque_type => |opaque_type| opaque_type.decl, - .enum_type => |enum_type| enum_type.decl, - else => null, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.owner_decl; }, - } + .opaque_type => |opaque_type| opaque_type.decl, + .enum_type => |enum_type| enum_type.decl, + else => null, + }; } pub fn isGenericPoison(ty: Type) bool { return ty.ip_index == .generic_poison_type; } - pub fn isBoundFn(ty: Type) bool { - return ty.ip_index == .none and ty.tag() == .bound_fn; - } - /// This enum does not directly correspond to `std.builtin.TypeId` because /// it has extra enum tags in it, as a way of using less memory. For example, /// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types @@ -3827,54 +3465,8 @@ pub const Type = struct { inferred_alloc_const, // See last_no_payload_tag below. // After this, the tag requires a payload. - error_union, - error_set, - error_set_single, - /// The type is the inferred error set of a specific function. - error_set_inferred, - error_set_merged, - pub const last_no_payload_tag = Tag.inferred_alloc_const; pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; - - pub fn Type(comptime t: Tag) type { - return switch (t) { - .inferred_alloc_const, - .inferred_alloc_mut, - => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"), - - .error_set => Payload.ErrorSet, - .error_set_inferred => Payload.ErrorSetInferred, - .error_set_merged => Payload.ErrorSetMerged, - - .error_union => Payload.ErrorUnion, - .error_set_single => Payload.Name, - }; - } - - pub fn init(comptime t: Tag) file_struct.Type { - comptime std.debug.assert(@enumToInt(t) < Tag.no_payload_count); - return file_struct.Type{ - .ip_index = .none, - .legacy = .{ .tag_if_small_enough = t }, - }; - } - - pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!file_struct.Type { - const p = try ally.create(t.Type()); - p.* = .{ - .base = .{ .tag = t }, - .data = data, - }; - return file_struct.Type{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &p.base }, - }; - } - - pub fn Data(comptime t: Tag) type { - return std.meta.fieldInfo(t.Type(), .data).type; - } }; pub fn isTuple(ty: Type, mod: *Module) bool { @@ -3928,37 +3520,6 @@ pub const Type = struct { pub const Payload = struct { tag: Tag, - pub const Len = struct { - base: Payload, - data: u64, - }; - - pub const Bits = struct { - base: Payload, - data: u16, - }; - - pub const ErrorSet = struct { - pub const base_tag = Tag.error_set; - - base: Payload = Payload{ .tag = base_tag }, - data: *Module.ErrorSet, - }; - - pub const ErrorSetMerged = struct { - pub const base_tag = Tag.error_set_merged; - - base: Payload = Payload{ .tag = base_tag }, - data: Module.ErrorSet.NameMap, - }; - - pub const ErrorSetInferred = struct { - pub const base_tag = Tag.error_set_inferred; - - base: Payload = Payload{ .tag = base_tag }, - data: *Module.Fn.InferredErrorSet, - }; - /// TODO: remove this data structure since we have `InternPool.Key.PtrType`. pub const Pointer = struct { data: Data, @@ -4010,27 +3571,6 @@ pub const Type = struct { } }; }; - - pub const ErrorUnion = struct { - pub const base_tag = Tag.error_union; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - error_set: Type, - payload: Type, - }, - }; - - pub const Decl = struct { - base: Payload, - data: *Module.Decl, - }; - - pub const Name = struct { - base: Payload, - /// memory is owned by `Module` - data: []const u8, - }; }; pub const @"u1": Type = .{ .ip_index = .u1_type, .legacy = undefined }; @@ -4164,19 +3704,6 @@ pub const Type = struct { return mod.optionalType(child_type.ip_index); } - pub fn errorUnion( - arena: Allocator, - error_set: Type, - payload: Type, - mod: *Module, - ) Allocator.Error!Type { - assert(error_set.zigTypeTag(mod) == .ErrorSet); - return Type.Tag.error_union.create(arena, .{ - .error_set = error_set, - .payload = payload, - }); - } - pub fn smallestUnsignedBits(max: u64) u16 { if (max == 0) return 0; const base = std.math.log2(max); diff --git a/src/value.zig b/src/value.zig index 310049608599..4408d1023115 100644 --- a/src/value.zig +++ b/src/value.zig @@ -260,7 +260,7 @@ pub const Value = struct { const new_payload = try arena.create(Payload.Ty); new_payload.* = .{ .base = payload.base, - .data = try payload.data.copy(arena), + .data = payload.data, }; return Value{ .ip_index = .none, @@ -281,7 +281,7 @@ pub const Value = struct { .base = payload.base, .data = .{ .container_ptr = try payload.data.container_ptr.copy(arena), - .container_ty = try payload.data.container_ty.copy(arena), + .container_ty = payload.data.container_ty, }, }; return Value{ @@ -296,7 +296,7 @@ pub const Value = struct { .base = payload.base, .data = .{ .field_val = try payload.data.field_val.copy(arena), - .field_ty = try payload.data.field_ty.copy(arena), + .field_ty = payload.data.field_ty, }, }; return Value{ @@ -311,7 +311,7 @@ pub const Value = struct { .base = payload.base, .data = .{ .array_ptr = try payload.data.array_ptr.copy(arena), - .elem_ty = try payload.data.elem_ty.copy(arena), + .elem_ty = payload.data.elem_ty, .index = payload.data.index, }, }; @@ -327,7 +327,7 @@ pub const Value = struct { .base = payload.base, .data = .{ .container_ptr = try payload.data.container_ptr.copy(arena), - .container_ty = try payload.data.container_ty.copy(arena), + .container_ty = payload.data.container_ty, .field_index = payload.data.field_index, }, }; @@ -1870,7 +1870,7 @@ pub const Value = struct { .eu_payload => { const a_payload = a.castTag(.eu_payload).?.data; const b_payload = b.castTag(.eu_payload).?.data; - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); }, .eu_payload_ptr => { @@ -2163,14 +2163,14 @@ pub const Value = struct { .ErrorUnion => { if (val.tag() == .@"error") { std.hash.autoHash(hasher, false); // error - const sub_ty = ty.errorUnionSet(); + const sub_ty = ty.errorUnionSet(mod); val.hash(sub_ty, hasher, mod); return; } if (val.castTag(.eu_payload)) |payload| { std.hash.autoHash(hasher, true); // payload - const sub_ty = ty.errorUnionPayload(); + const sub_ty = ty.errorUnionPayload(mod); payload.data.hash(sub_ty, hasher, mod); return; } else unreachable; @@ -2272,7 +2272,7 @@ pub const Value = struct { payload.data.hashUncoerced(child_ty, hasher, mod); } else std.hash.autoHash(hasher, std.builtin.TypeId.Null), .ErrorSet, .ErrorUnion => if (val.getError()) |err| hasher.update(err) else { - const pl_ty = ty.errorUnionPayload(); + const pl_ty = ty.errorUnionPayload(mod); val.castTag(.eu_payload).?.data.hashUncoerced(pl_ty, hasher, mod); }, .Enum, .EnumLiteral, .Union => { From d28fc5bacb2b27ba3f2a5ed17475b9b790be3ed5 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 19 May 2023 21:35:55 -0400 Subject: [PATCH 073/205] InternPool: add repeated aggregate storage --- src/InternPool.zig | 98 ++++++++++++++++++++++++++++++++++++++++------ src/Sema.zig | 14 +++---- src/type.zig | 4 +- src/value.zig | 12 ++++-- 4 files changed, 104 insertions(+), 24 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 79506c4404b9..a16ebe64872f 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -506,7 +506,12 @@ pub const Key = union(enum) { pub const Aggregate = struct { ty: Index, - fields: []const Index, + storage: Storage, + + pub const Storage = union(enum) { + elems: []const Index, + repeated_elem: Index, + }; }; pub fn hash32(key: Key) u32 { @@ -578,7 +583,14 @@ pub const Key = union(enum) { .aggregate => |aggregate| { std.hash.autoHash(hasher, aggregate.ty); - for (aggregate.fields) |field| std.hash.autoHash(hasher, field); + std.hash.autoHash(hasher, @as( + @typeInfo(Key.Aggregate.Storage).Union.tag_type.?, + aggregate.storage, + )); + switch (aggregate.storage) { + .elems => |elems| for (elems) |elem| std.hash.autoHash(hasher, elem), + .repeated_elem => |elem| std.hash.autoHash(hasher, elem), + } }, .error_set_type => |error_set_type| { @@ -756,7 +768,14 @@ pub const Key = union(enum) { .aggregate => |a_info| { const b_info = b.aggregate; if (a_info.ty != b_info.ty) return false; - return std.mem.eql(Index, a_info.fields, b_info.fields); + + const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?; + if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) return false; + + return switch (a_info.storage) { + .elems => |a_elems| std.mem.eql(Index, a_elems, b_info.storage.elems), + .repeated_elem => |a_elem| a_elem == b_info.storage.repeated_elem, + }; }, .anon_struct_type => |a_info| { const b_info = b.anon_struct_type; @@ -1407,6 +1426,9 @@ pub const Tag = enum(u8) { /// An instance of a struct, array, or vector. /// data is extra index to `Aggregate`. aggregate, + /// An instance of an array or vector with every element being the same value. + /// data is extra index to `Repeated`. + repeated, }; /// Trailing: @@ -1446,6 +1468,13 @@ pub const Aggregate = struct { ty: Index, }; +pub const Repeated = struct { + /// The type of the aggregate. + ty: Index, + /// The value of every element. + elem_val: Index, +}; + /// Trailing: /// 0. type: Index for each fields_len /// 1. value: Index for each fields_len @@ -2049,13 +2078,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { // as the tuple case below). .struct_type => .{ .aggregate = .{ .ty = ty, - .fields = &.{}, + .storage = .{ .elems = &.{} }, } }, // There is only one possible value precisely due to the // fact that this values slice is fully populated! .anon_struct_type => |anon_struct_type| .{ .aggregate = .{ .ty = ty, - .fields = anon_struct_type.values, + .storage = .{ .elems = anon_struct_type.values }, } }, else => unreachable, }; @@ -2066,7 +2095,14 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]); return .{ .aggregate = .{ .ty = extra.data.ty, - .fields = fields, + .storage = .{ .elems = fields }, + } }; + }, + .repeated => { + const extra = ip.extraData(Repeated, data); + return .{ .aggregate = .{ + .ty = extra.ty, + .storage = .{ .repeated_elem = extra.elem_val }, } }; }, .union_value => .{ .un = ip.extraData(Key.Union, data) }, @@ -2663,10 +2699,18 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .aggregate => |aggregate| { assert(aggregate.ty != .none); - for (aggregate.fields) |elem| assert(elem != .none); - assert(aggregate.fields.len == ip.aggregateTypeLen(aggregate.ty)); + const aggregate_len = ip.aggregateTypeLen(aggregate.ty); + switch (aggregate.storage) { + .elems => |elems| { + assert(elems.len == aggregate_len); + for (elems) |elem| assert(elem != .none); + }, + .repeated_elem => |elem| { + assert(elem != .none); + }, + } - if (aggregate.fields.len == 0) { + if (aggregate_len == 0) { ip.items.appendAssumeCapacity(.{ .tag = .only_possible_value, .data = @enumToInt(aggregate.ty), @@ -2676,7 +2720,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { switch (ip.indexToKey(aggregate.ty)) { .anon_struct_type => |anon_struct_type| { - if (std.mem.eql(Index, anon_struct_type.values, aggregate.fields)) { + if (switch (aggregate.storage) { + .elems => |elems| std.mem.eql(Index, anon_struct_type.values, elems), + .repeated_elem => |elem| for (anon_struct_type.values) |value| { + if (value != elem) break false; + } else true, + }) { // This encoding works thanks to the fact that, as we just verified, // the type itself contains a slice of values that can be provided // in the aggregate fields. @@ -2690,9 +2739,33 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { else => {}, } + if (switch (aggregate.storage) { + .elems => |elems| for (elems[1..]) |elem| { + if (elem != elems[0]) break false; + } else true, + .repeated_elem => true, + }) { + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Repeated).Struct.fields.len, + ); + + ip.items.appendAssumeCapacity(.{ + .tag = .repeated, + .data = ip.addExtraAssumeCapacity(Repeated{ + .ty = aggregate.ty, + .elem_val = switch (aggregate.storage) { + .elems => |elems| elems[0], + .repeated_elem => |elem| elem, + }, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } + try ip.extra.ensureUnusedCapacity( gpa, - @typeInfo(Aggregate).Struct.fields.len + aggregate.fields.len, + @typeInfo(Aggregate).Struct.fields.len + aggregate_len, ); ip.items.appendAssumeCapacity(.{ @@ -2701,7 +2774,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .ty = aggregate.ty, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.fields)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.storage.elems)); }, .un => |un| { @@ -3417,6 +3490,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { const fields_len = @intCast(u32, ip.aggregateTypeLen(info.ty)); break :b @sizeOf(Aggregate) + (@sizeOf(u32) * fields_len); }, + .repeated => @sizeOf(Repeated), .float_f16 => 0, .float_f32 => 0, diff --git a/src/Sema.zig b/src/Sema.zig index be505d74a3cf..bf4d13824a34 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -12671,7 +12671,7 @@ fn analyzeTupleCat( const runtime_src = opt_runtime_src orelse { const tuple_val = try mod.intern(.{ .aggregate = .{ .ty = tuple_ty, - .fields = values, + .storage = .{ .elems = values }, } }); return sema.addConstant(tuple_ty.toType(), tuple_val.toValue()); }; @@ -12989,7 +12989,7 @@ fn analyzeTupleMul( const runtime_src = opt_runtime_src orelse { const tuple_val = try mod.intern(.{ .aggregate = .{ .ty = tuple_ty, - .fields = values, + .storage = .{ .elems = values }, } }); return sema.addConstant(tuple_ty.toType(), tuple_val.toValue()); }; @@ -18227,7 +18227,7 @@ fn zirStructInitAnon( const runtime_index = opt_runtime_index orelse { const tuple_val = try mod.intern(.{ .aggregate = .{ .ty = tuple_ty, - .fields = values, + .storage = .{ .elems = values }, } }); return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref); }; @@ -18442,7 +18442,7 @@ fn zirArrayInitAnon( const runtime_src = opt_runtime_src orelse { const tuple_val = try mod.intern(.{ .aggregate = .{ .ty = tuple_ty, - .fields = values, + .storage = .{ .elems = values }, } }); return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref); }; @@ -29176,7 +29176,7 @@ fn coerceTupleToTuple( tuple_ty, (try mod.intern(.{ .aggregate = .{ .ty = tuple_ty.ip_index, - .fields = field_vals, + .storage = .{ .elems = field_vals }, } })).toValue(), ); } @@ -33085,7 +33085,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // one-possible-value in type.zig. const empty = try mod.intern(.{ .aggregate = .{ .ty = ty.ip_index, - .fields = &.{}, + .storage = .{ .elems = &.{} }, } }); return empty.toValue(); }, @@ -33098,7 +33098,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // therefore has one possible value. return (try mod.intern(.{ .aggregate = .{ .ty = ty.ip_index, - .fields = tuple.values, + .storage = .{ .elems = tuple.values }, } })).toValue(); }, diff --git a/src/type.zig b/src/type.zig index 4e90cbd34d89..019557c5e0a5 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2738,7 +2738,7 @@ pub const Type = struct { // one-possible-value logic in Sema.zig. const empty = try mod.intern(.{ .aggregate = .{ .ty = ty.ip_index, - .fields = &.{}, + .storage = .{ .elems = &.{} }, } }); return empty.toValue(); }, @@ -2751,7 +2751,7 @@ pub const Type = struct { // therefore has one possible value. return (try mod.intern(.{ .aggregate = .{ .ty = ty.ip_index, - .fields = tuple.values, + .storage = .{ .elems = tuple.values }, } })).toValue(); }, diff --git a/src/value.zig b/src/value.zig index 4408d1023115..db79fa3fe689 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2665,7 +2665,10 @@ pub const Value = struct { else => unreachable, }, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .aggregate => |aggregate| aggregate.fields[index].toValue(), + .aggregate => |aggregate| switch (aggregate.storage) { + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }.toValue(), else => unreachable, }, } @@ -2753,8 +2756,11 @@ pub const Value = struct { else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .undef => return true, .simple_value => |v| if (v == .undefined) return true, - .aggregate => |aggregate| for (aggregate.fields) |field| { - if (try anyUndef(field.toValue(), mod)) return true; + .aggregate => |aggregate| switch (aggregate.storage) { + .elems => |elems| for (elems) |elem| { + if (try anyUndef(elem.toValue(), mod)) return true; + }, + .repeated_elem => |elem| if (try anyUndef(elem.toValue(), mod)) return true, }, else => {}, }, From f8b6eb63d53bba303a200a5c2493c5d5bddd1f66 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 19 May 2023 22:24:29 -0400 Subject: [PATCH 074/205] Sema: add coerceTupleToStruct result to the InternPool --- src/Sema.zig | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index bf4d13824a34..22ae6fb53154 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -28986,7 +28986,7 @@ fn coerceTupleToStruct( } const fields = struct_ty.structFields(mod); - const field_vals = try sema.arena.alloc(Value, fields.count()); + const field_vals = try sema.arena.alloc(InternPool.Index, fields.count()); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); @@ -29017,7 +29017,8 @@ fn coerceTupleToStruct( } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - field_vals[field_index] = field_val; + assert(field_val.ip_index != .none); + field_vals[field_index] = field_val.ip_index; } else { runtime_src = field_src; } @@ -29045,7 +29046,8 @@ fn coerceTupleToStruct( continue; } if (runtime_src == null) { - field_vals[i] = field.default_val; + assert(field.default_val.ip_index != .none); + field_vals[i] = field.default_val.ip_index; } else { field_ref.* = try sema.addConstant(field.ty, field.default_val); } @@ -29062,10 +29064,14 @@ fn coerceTupleToStruct( return block.addAggregateInit(struct_ty, field_refs); } - return sema.addConstant( - struct_ty, - try Value.Tag.aggregate.create(sema.arena, field_vals), - ); + assert(struct_ty.ip_index != .none); + const struct_val = try mod.intern(.{ .aggregate = .{ + .ty = struct_ty.ip_index, + .storage = .{ .elems = field_vals }, + } }); + errdefer mod.intern_pool.remove(struct_val); + + return sema.addConstant(struct_ty, struct_val.toValue()); } fn coerceTupleToTuple( From c4735941148eb32eee16307ba13876ae21606fba Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 19 May 2023 23:16:36 -0400 Subject: [PATCH 075/205] Sema: port reify struct access to use InternPool --- src/Sema.zig | 204 +++++++++++++++++++++++++++------------------------ src/type.zig | 9 ++- 2 files changed, 117 insertions(+), 96 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 22ae6fb53154..ebef3e929b82 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -18805,6 +18805,7 @@ fn zirReify( const target = mod.getTarget(); const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag, mod).?; if (try union_val.val.anyUndef(mod)) return sema.failWithUseOfUndef(block, src); + const ip = &mod.intern_pool; switch (@intToEnum(std.builtin.TypeId, tag_index)) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, @@ -18817,10 +18818,12 @@ fn zirReify( .AnyFrame => return sema.failWithUseOfAsync(block, src), .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - const signedness_val = struct_val[0]; - const bits_val = struct_val[1]; + const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const signedness_index = fields.getIndex("signedness").?; + const bits_index = fields.getIndex("bits").?; + + const signedness_val = try union_val.val.fieldValue(fields.values()[signedness_index].ty, mod, signedness_index); + const bits_val = try union_val.val.fieldValue(fields.values()[bits_index].ty, mod, bits_index); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); @@ -18828,10 +18831,12 @@ fn zirReify( return sema.addType(ty); }, .Vector => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - const len_val = struct_val[0]; - const child_val = struct_val[1]; + const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const len_index = fields.getIndex("len").?; + const child_index = fields.getIndex("child").?; + + const len_val = try union_val.val.fieldValue(fields.values()[len_index].ty, mod, len_index); + const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); const len = @intCast(u32, len_val.toUnsignedInt(mod)); const child_ty = child_val.toType(); @@ -18845,10 +18850,10 @@ fn zirReify( return sema.addType(ty); }, .Float => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // bits: comptime_int, - const bits_val = struct_val[0]; + const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const bits_index = fields.getIndex("bits").?; + + const bits_val = try union_val.val.fieldValue(fields.values()[bits_index].ty, mod, bits_index); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = switch (bits) { @@ -18862,16 +18867,24 @@ fn zirReify( return sema.addType(ty); }, .Pointer => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - const size_val = struct_val[0]; - const is_const_val = struct_val[1]; - const is_volatile_val = struct_val[2]; - const alignment_val = struct_val[3]; - const address_space_val = struct_val[4]; - const child_val = struct_val[5]; - const is_allowzero_val = struct_val[6]; - const sentinel_val = struct_val[7]; + const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const size_index = fields.getIndex("size").?; + const is_const_index = fields.getIndex("is_const").?; + const is_volatile_index = fields.getIndex("is_volatile").?; + const alignment_index = fields.getIndex("alignment").?; + const address_space_index = fields.getIndex("address_space").?; + const child_index = fields.getIndex("child").?; + const is_allowzero_index = fields.getIndex("is_allowzero").?; + const sentinel_index = fields.getIndex("sentinel").?; + + const size_val = try union_val.val.fieldValue(fields.values()[size_index].ty, mod, size_index); + const is_const_val = try union_val.val.fieldValue(fields.values()[is_const_index].ty, mod, is_const_index); + const is_volatile_val = try union_val.val.fieldValue(fields.values()[is_volatile_index].ty, mod, is_volatile_index); + const alignment_val = try union_val.val.fieldValue(fields.values()[alignment_index].ty, mod, alignment_index); + const address_space_val = try union_val.val.fieldValue(fields.values()[address_space_index].ty, mod, address_space_index); + const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); + const is_allowzero_val = try union_val.val.fieldValue(fields.values()[is_allowzero_index].ty, mod, is_allowzero_index); + const sentinel_val = try union_val.val.fieldValue(fields.values()[sentinel_index].ty, mod, sentinel_index); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); @@ -18954,14 +18967,14 @@ fn zirReify( return sema.addType(ty); }, .Array => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // len: comptime_int, - const len_val = struct_val[0]; - // child: type, - const child_val = struct_val[1]; - // sentinel: ?*const anyopaque, - const sentinel_val = struct_val[2]; + const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const len_index = fields.getIndex("len").?; + const child_index = fields.getIndex("child").?; + const sentinel_index = fields.getIndex("sentinel").?; + + const len_val = try union_val.val.fieldValue(fields.values()[len_index].ty, mod, len_index); + const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); + const sentinel_val = try union_val.val.fieldValue(fields.values()[sentinel_index].ty, mod, sentinel_index); const len = len_val.toUnsignedInt(mod); const child_ty = child_val.toType(); @@ -18977,10 +18990,10 @@ fn zirReify( return sema.addType(ty); }, .Optional => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // child: type, - const child_val = struct_val[0]; + const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const child_index = fields.getIndex("child").?; + + const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); const child_ty = child_val.toType(); @@ -18988,12 +19001,12 @@ fn zirReify( return sema.addType(ty); }, .ErrorUnion => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // error_set: type, - const error_set_val = struct_val[0]; - // payload: type, - const payload_val = struct_val[1]; + const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const error_set_index = fields.getIndex("error_set").?; + const payload_index = fields.getIndex("payload").?; + + const error_set_val = try union_val.val.fieldValue(fields.values()[error_set_index].ty, mod, error_set_index); + const payload_val = try union_val.val.fieldValue(fields.values()[payload_index].ty, mod, payload_index); const error_set_ty = error_set_val.toType(); const payload_ty = payload_val.toType(); @@ -19031,19 +19044,18 @@ fn zirReify( return sema.addType(ty); }, .Struct => { - // TODO use reflection instead of magic numbers here - const struct_val = union_val.val.castTag(.aggregate).?.data; - // layout: containerlayout, - const layout_val = struct_val[0]; - // backing_int: ?type, - const backing_int_val = struct_val[1]; - // fields: []const enumfield, - const fields_val = struct_val[2]; - // decls: []const declaration, - const decls_val = struct_val[3]; - // is_tuple: bool, - const is_tuple_val = struct_val[4]; - assert(struct_val.len == 5); + const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const layout_index = fields.getIndex("layout").?; + const backing_integer_index = fields.getIndex("backing_integer").?; + const fields_index = fields.getIndex("fields").?; + const decls_index = fields.getIndex("decls").?; + const is_tuple_index = fields.getIndex("is_tuple").?; + + const layout_val = try union_val.val.fieldValue(fields.values()[layout_index].ty, mod, layout_index); + const backing_integer_val = try union_val.val.fieldValue(fields.values()[backing_integer_index].ty, mod, backing_integer_index); + const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index); + const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); + const is_tuple_val = try union_val.val.fieldValue(fields.values()[is_tuple_index].ty, mod, is_tuple_index); const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -19052,23 +19064,23 @@ fn zirReify( return sema.fail(block, src, "reified structs must have no decls", .{}); } - if (layout != .Packed and !backing_int_val.isNull(mod)) { + if (layout != .Packed and !backing_integer_val.isNull(mod)) { return sema.fail(block, src, "non-packed struct does not support backing integer type", .{}); } - return try sema.reifyStruct(block, inst, src, layout, backing_int_val, fields_val, name_strategy, is_tuple_val.toBool(mod)); + return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool(mod)); }, .Enum => { - const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // tag_type: type, - const tag_type_val = struct_val[0]; - // fields: []const EnumField, - const fields_val = struct_val[1]; - // decls: []const Declaration, - const decls_val = struct_val[2]; - // is_exhaustive: bool, - const is_exhaustive_val = struct_val[3]; + const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const tag_type_index = fields.getIndex("tag_type").?; + const fields_index = fields.getIndex("fields").?; + const decls_index = fields.getIndex("decls").?; + const is_exhaustive_index = fields.getIndex("is_exhaustive").?; + + const tag_type_val = try union_val.val.fieldValue(fields.values()[tag_type_index].ty, mod, tag_type_index); + const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index); + const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); + const is_exhaustive_val = try union_val.val.fieldValue(fields.values()[is_exhaustive_index].ty, mod, is_exhaustive_index); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19160,9 +19172,10 @@ fn zirReify( return sema.analyzeDeclVal(block, src, new_decl_index); }, .Opaque => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // decls: []const Declaration, - const decls_val = struct_val[0]; + const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const decls_index = fields.getIndex("decls").?; + + const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19205,16 +19218,16 @@ fn zirReify( return sema.analyzeDeclVal(block, src, new_decl_index); }, .Union => { - // TODO use reflection instead of magic numbers here - const struct_val = union_val.val.castTag(.aggregate).?.data; - // layout: containerlayout, - const layout_val = struct_val[0]; - // tag_type: ?type, - const tag_type_val = struct_val[1]; - // fields: []const enumfield, - const fields_val = struct_val[2]; - // decls: []const declaration, - const decls_val = struct_val[3]; + const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const layout_index = fields.getIndex("layout").?; + const tag_type_index = fields.getIndex("tag_type").?; + const fields_index = fields.getIndex("fields").?; + const decls_index = fields.getIndex("decls").?; + + const layout_val = try union_val.val.fieldValue(fields.values()[layout_index].ty, mod, layout_index); + const tag_type_val = try union_val.val.fieldValue(fields.values()[tag_type_index].ty, mod, tag_type_index); + const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index); + const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19411,25 +19424,28 @@ fn zirReify( return sema.analyzeDeclVal(block, src, new_decl_index); }, .Fn => { - const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // calling_convention: CallingConvention, - const cc = mod.toEnum(std.builtin.CallingConvention, struct_val[0]); - // alignment: comptime_int, - const alignment_val = struct_val[1]; - // is_generic: bool, - const is_generic = struct_val[2].toBool(mod); - // is_var_args: bool, - const is_var_args = struct_val[3].toBool(mod); - // return_type: ?type, - const return_type_val = struct_val[4]; - // args: []const Param, - const args_val = struct_val[5]; - + const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const calling_convention_index = fields.getIndex("calling_convention").?; + const alignment_index = fields.getIndex("alignment").?; + const is_generic_index = fields.getIndex("is_generic").?; + const is_var_args_index = fields.getIndex("is_var_args").?; + const return_type_index = fields.getIndex("return_type").?; + const params_index = fields.getIndex("params").?; + + const calling_convention_val = try union_val.val.fieldValue(fields.values()[calling_convention_index].ty, mod, calling_convention_index); + const alignment_val = try union_val.val.fieldValue(fields.values()[alignment_index].ty, mod, alignment_index); + const is_generic_val = try union_val.val.fieldValue(fields.values()[is_generic_index].ty, mod, is_generic_index); + const is_var_args_val = try union_val.val.fieldValue(fields.values()[is_var_args_index].ty, mod, is_var_args_index); + const return_type_val = try union_val.val.fieldValue(fields.values()[return_type_index].ty, mod, return_type_index); + const params_val = try union_val.val.fieldValue(fields.values()[params_index].ty, mod, params_index); + + const is_generic = is_generic_val.toBool(mod); if (is_generic) { return sema.fail(block, src, "Type.Fn.is_generic must be false for @Type", .{}); } + const is_var_args = is_var_args_val.toBool(mod); + const cc = mod.toEnum(std.builtin.CallingConvention, calling_convention_val); if (is_var_args and cc != .C) { return sema.fail(block, src, "varargs functions must have C calling convention", .{}); } @@ -19448,7 +19464,7 @@ fn zirReify( const return_type = return_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); - const args_slice_val = args_val.castTag(.slice).?.data; + const args_slice_val = params_val.castTag(.slice).?.data; const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod)); const param_types = try sema.arena.alloc(InternPool.Index, args_len); diff --git a/src/type.zig b/src/type.zig index 019557c5e0a5..d0cf05484582 100644 --- a/src/type.zig +++ b/src/type.zig @@ -3148,8 +3148,13 @@ pub const Type = struct { pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { const ip = &mod.intern_pool; const enum_type = ip.indexToKey(ty.ip_index).enum_type; - assert(ip.typeOf(enum_tag.ip_index) == enum_type.tag_ty); - return enum_type.tagValueIndex(ip, enum_tag.ip_index); + const int_tag = switch (ip.indexToKey(enum_tag.ip_index)) { + .int => enum_tag.ip_index, + .enum_tag => |info| info.int, + else => unreachable, + }; + assert(ip.typeOf(int_tag) == enum_type.tag_ty); + return enum_type.tagValueIndex(ip, int_tag); } pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields { From be78a12d7d5ac0a711fdf7237d7ccefba42be83c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 20 May 2023 00:05:29 -0400 Subject: [PATCH 076/205] Sema: port Value.decl_ptr to InternPool --- src/InternPool.zig | 47 +++++++++++++++++++++++++++++++++------------- src/Sema.zig | 37 ++++++++++++++++-------------------- 2 files changed, 50 insertions(+), 34 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index a16ebe64872f..04ec10fe72a0 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -575,9 +575,10 @@ pub const Key = union(enum) { std.hash.autoHash(hasher, ptr.ty); // Int-to-ptr pointers are hashed separately than decl-referencing pointers. // This is sound due to pointer provenance rules. + std.hash.autoHash(hasher, @as(@typeInfo(Key.Ptr.Addr).Union.tag_type.?, ptr.addr)); switch (ptr.addr) { + .decl => |decl| std.hash.autoHash(hasher, decl), .int => |int| std.hash.autoHash(hasher, int), - .decl => @panic("TODO"), } }, @@ -690,19 +691,14 @@ pub const Key = union(enum) { .ptr => |a_info| { const b_info = b.ptr; + if (a_info.ty != b_info.ty) return false; - if (a_info.ty != b_info.ty) - return false; + const AddrTag = @typeInfo(Key.Ptr.Addr).Union.tag_type.?; + if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false; return switch (a_info.addr) { - .int => |a_int| switch (b_info.addr) { - .int => |b_int| a_int == b_int, - .decl => false, - }, - .decl => |a_decl| switch (b_info.addr) { - .int => false, - .decl => |b_decl| a_decl == b_decl, - }, + .decl => |a_decl| a_decl == b_info.addr.decl, + .int => |a_int| a_int == b_info.addr.int, }; }, @@ -1334,7 +1330,10 @@ pub const Tag = enum(u8) { /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, - /// A pointer to an integer value. + /// A pointer to a decl. + /// data is extra index of PtrDecl, which contains the type and address. + ptr_decl, + /// A pointer with an integer value. /// data is extra index of PtrInt, which contains the type and address. /// Only pointer types are allowed to have this encoding. Optional types must use /// `opt_payload` or `opt_null`. @@ -1673,6 +1672,11 @@ pub const PackedU64 = packed struct(u64) { } }; +pub const PtrDecl = struct { + ty: Index, + decl: Module.Decl.Index, +}; + pub const PtrInt = struct { ty: Index, addr: Index, @@ -1990,6 +1994,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .val = payload_val, } }; }, + .ptr_decl => { + const info = ip.extraData(PtrDecl, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .decl = info.decl }, + } }; + }, .ptr_int => { const info = ip.extraData(PtrInt, data); return .{ .ptr = .{ @@ -2462,7 +2473,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .extern_func => @panic("TODO"), .ptr => |ptr| switch (ptr.addr) { - .decl => @panic("TODO"), + .decl => |decl| { + assert(ptr.ty != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_decl, + .data = try ip.addExtra(gpa, PtrDecl{ + .ty = ptr.ty, + .decl = decl, + }), + }); + }, .int => |int| { assert(ptr.ty != .none); ip.items.appendAssumeCapacity(.{ @@ -3465,6 +3485,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .undef => 0, .simple_type => 0, .simple_value => 0, + .ptr_decl => @sizeOf(PtrDecl), .ptr_int => @sizeOf(PtrInt), .opt_null => 0, .opt_payload => 0, diff --git a/src/Sema.zig b/src/Sema.zig index ebef3e929b82..145f514ebca9 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -29294,33 +29294,28 @@ fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref /// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps /// this function with `analyze_fn_body` set to true. fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: bool) CompileError!Air.Inst.Ref { - try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); + const mod = sema.mod; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); try sema.ensureDeclAnalyzed(decl_index); - const decl = sema.mod.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); - if (decl_tv.val.castTag(.variable)) |payload| { - const variable = payload.data; - const ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = decl_tv.ty, - .mutable = variable.is_mutable, - .@"addrspace" = decl.@"addrspace", - .@"align" = decl.@"align", - }); - return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl_index)); - } + const ptr_ty = try mod.ptrType(.{ + .elem_type = decl_tv.ty.ip_index, + .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), + .is_const = if (decl_tv.val.castTag(.variable)) |payload| + !payload.data.is_mutable + else + false, + .address_space = decl.@"addrspace", + }); if (analyze_fn_body) { try sema.maybeQueueFuncBodyAnalysis(decl_index); } - return sema.addConstant( - try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = decl_tv.ty, - .mutable = false, - .@"addrspace" = decl.@"addrspace", - .@"align" = decl.@"align", - }), - try Value.Tag.decl_ref.create(sema.arena, decl_index), - ); + return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_ty.ip_index, + .addr = .{ .decl = decl_index }, + } })).toValue()); } fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void { From 115c08956278b79c848e04c2f4eefca40e6cd8a3 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 20 May 2023 09:35:11 -0400 Subject: [PATCH 077/205] Value: add `intern` and `unintern` to facilitate code conversion This allows some code (like struct initializers) to use interned types while other code (such as comptime mutation) continues to use legacy types. With these changes, an `zig build-obj empty.zig` gets to a crash on missing interned error union types. --- src/InternPool.zig | 222 ++++++++++++++++++++++++--- src/Module.zig | 2 +- src/Sema.zig | 93 +++++++++--- src/codegen/c.zig | 2 +- src/codegen/llvm.zig | 353 ++++++++++++++++++++++++++++--------------- src/value.zig | 108 +++++++++++-- 6 files changed, 603 insertions(+), 177 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 04ec10fe72a0..8b826458a846 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -100,6 +100,16 @@ pub const MapIndex = enum(u32) { } }; +pub const RuntimeIndex = enum(u32) { + zero = 0, + comptime_field_ptr = std.math.maxInt(u32), + _, + + pub fn increment(ri: *RuntimeIndex) void { + ri.* = @intToEnum(RuntimeIndex, @enumToInt(ri.*) + 1); + } +}; + /// An index into `string_bytes`. pub const NullTerminatedString = enum(u32) { _, @@ -478,11 +488,27 @@ pub const Key = union(enum) { }; pub const Ptr = struct { + /// This is the pointer type, not the element type. ty: Index, + /// The value of the address that the pointer points to. addr: Addr, + /// This could be `none` if size is not a slice. + len: Index = .none, pub const Addr = union(enum) { + @"var": struct { + init: Index, + owner_decl: Module.Decl.Index, + lib_name: OptionalNullTerminatedString, + is_const: bool, + is_threadlocal: bool, + is_weak_linkage: bool, + }, decl: Module.Decl.Index, + mut_decl: struct { + decl: Module.Decl.Index, + runtime_index: RuntimeIndex, + }, int: Index, }; }; @@ -577,7 +603,9 @@ pub const Key = union(enum) { // This is sound due to pointer provenance rules. std.hash.autoHash(hasher, @as(@typeInfo(Key.Ptr.Addr).Union.tag_type.?, ptr.addr)); switch (ptr.addr) { + .@"var" => |@"var"| std.hash.autoHash(hasher, @"var".owner_decl), .decl => |decl| std.hash.autoHash(hasher, decl), + .mut_decl => |mut_decl| std.hash.autoHash(hasher, mut_decl), .int => |int| std.hash.autoHash(hasher, int), } }, @@ -697,7 +725,9 @@ pub const Key = union(enum) { if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false; return switch (a_info.addr) { + .@"var" => |a_var| a_var.owner_decl == b_info.addr.@"var".owner_decl, .decl => |a_decl| a_decl == b_info.addr.decl, + .mut_decl => |a_mut_decl| std.meta.eql(a_mut_decl, b_info.addr.mut_decl), .int => |a_int| a_int == b_info.addr.int, }; }, @@ -1330,6 +1360,12 @@ pub const Tag = enum(u8) { /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, + /// A pointer to a var. + /// data is extra index of PtrVal, which contains the type and address. + ptr_var, + /// A pointer to a decl that can be mutated at comptime. + /// data is extra index of PtrMutDecl, which contains the type and address. + ptr_mut_decl, /// A pointer to a decl. /// data is extra index of PtrDecl, which contains the type and address. ptr_decl, @@ -1338,6 +1374,11 @@ pub const Tag = enum(u8) { /// Only pointer types are allowed to have this encoding. Optional types must use /// `opt_payload` or `opt_null`. ptr_int, + /// A slice. + /// data is extra index of PtrSlice, which contains the ptr and len values + /// In order to use this encoding, one must ensure that the `InternPool` + /// already contains the slice type corresponding to this payload. + ptr_slice, /// An optional value that is non-null. /// data is Index of the payload value. /// In order to use this encoding, one must ensure that the `InternPool` @@ -1672,16 +1713,45 @@ pub const PackedU64 = packed struct(u64) { } }; +pub const PtrVar = struct { + ty: Index, + /// If flags.is_extern == true this is `none`. + init: Index, + owner_decl: Module.Decl.Index, + /// Library name if specified. + /// For example `extern "c" var stderrp = ...` would have 'c' as library name. + lib_name: OptionalNullTerminatedString, + flags: Flags, + + pub const Flags = packed struct(u32) { + is_const: bool, + is_threadlocal: bool, + is_weak_linkage: bool, + unused: u29 = undefined, + }; +}; + pub const PtrDecl = struct { ty: Index, decl: Module.Decl.Index, }; +pub const PtrMutDecl = struct { + ty: Index, + decl: Module.Decl.Index, + runtime_index: RuntimeIndex, +}; + pub const PtrInt = struct { ty: Index, addr: Index, }; +pub const PtrSlice = struct { + ptr: Index, + len: Index, +}; + /// Trailing: Limb for every limbs_len pub const Int = struct { ty: Index, @@ -1994,6 +2064,30 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .val = payload_val, } }; }, + .ptr_var => { + const info = ip.extraData(PtrVar, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .@"var" = .{ + .init = info.init, + .owner_decl = info.owner_decl, + .lib_name = info.lib_name, + .is_const = info.flags.is_const, + .is_threadlocal = info.flags.is_threadlocal, + .is_weak_linkage = info.flags.is_weak_linkage, + } }, + } }; + }, + .ptr_mut_decl => { + const info = ip.extraData(PtrMutDecl, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .mut_decl = .{ + .decl = info.decl, + .runtime_index = info.runtime_index, + } }, + } }; + }, .ptr_decl => { const info = ip.extraData(PtrDecl, data); return .{ .ptr = .{ @@ -2008,6 +2102,18 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .addr = .{ .int = info.addr }, } }; }, + .ptr_slice => { + const info = ip.extraData(PtrSlice, data); + const ptr = ip.indexToKey(info.ptr).ptr; + var ptr_ty = ip.indexToKey(ptr.ty); + assert(ptr_ty.ptr_type.size == .Many); + ptr_ty.ptr_type.size = .Slice; + return .{ .ptr = .{ + .ty = ip.getAssumeExists(ptr_ty), + .addr = ptr.addr, + .len = info.len, + } }; + }, .int_u8 => .{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = data }, @@ -2472,31 +2578,67 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .extern_func => @panic("TODO"), - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| { - assert(ptr.ty != .none); - ip.items.appendAssumeCapacity(.{ - .tag = .ptr_decl, - .data = try ip.addExtra(gpa, PtrDecl{ - .ty = ptr.ty, - .decl = decl, + .ptr => |ptr| switch (ip.items.items(.tag)[@enumToInt(ptr.ty)]) { + .type_pointer => { + assert(ptr.len == .none); + switch (ptr.addr) { + .@"var" => |@"var"| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_var, + .data = try ip.addExtra(gpa, PtrVar{ + .ty = ptr.ty, + .init = @"var".init, + .owner_decl = @"var".owner_decl, + .lib_name = @"var".lib_name, + .flags = .{ + .is_const = @"var".is_const, + .is_threadlocal = @"var".is_threadlocal, + .is_weak_linkage = @"var".is_weak_linkage, + }, + }), }), - }); + .decl => |decl| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_decl, + .data = try ip.addExtra(gpa, PtrDecl{ + .ty = ptr.ty, + .decl = decl, + }), + }), + .mut_decl => |mut_decl| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_mut_decl, + .data = try ip.addExtra(gpa, PtrMutDecl{ + .ty = ptr.ty, + .decl = mut_decl.decl, + .runtime_index = mut_decl.runtime_index, + }), + }), + .int => |int| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_int, + .data = try ip.addExtra(gpa, PtrInt{ + .ty = ptr.ty, + .addr = int, + }), + }), + } }, - .int => |int| { - assert(ptr.ty != .none); + .type_slice => { + assert(ptr.len != .none); + var new_key = key; + new_key.ptr.ty = @intToEnum(Index, ip.items.items(.data)[@enumToInt(ptr.ty)]); + new_key.ptr.len = .none; + const ptr_index = try get(ip, gpa, new_key); + try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ - .tag = .ptr_int, - .data = try ip.addExtra(gpa, PtrInt{ - .ty = ptr.ty, - .addr = int, + .tag = .ptr_slice, + .data = try ip.addExtra(gpa, PtrSlice{ + .ptr = ptr_index, + .len = ptr.len, }), }); }, + else => unreachable, }, .opt => |opt| { - assert(opt.ty != .none); assert(ip.isOptionalType(opt.ty)); ip.items.appendAssumeCapacity(if (opt.val == .none) .{ .tag = .opt_null, @@ -3087,11 +3229,15 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { Module.Namespace.OptionalIndex => @enumToInt(@field(extra, field.name)), MapIndex => @enumToInt(@field(extra, field.name)), OptionalMapIndex => @enumToInt(@field(extra, field.name)), + RuntimeIndex => @enumToInt(@field(extra, field.name)), + NullTerminatedString => @enumToInt(@field(extra, field.name)), + OptionalNullTerminatedString => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), Pointer.Flags => @bitCast(u32, @field(extra, field.name)), TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), Pointer.VectorIndex => @enumToInt(@field(extra, field.name)), + PtrVar.Flags => @bitCast(u32, @field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), }); } @@ -3149,11 +3295,15 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: Module.Namespace.OptionalIndex => @intToEnum(Module.Namespace.OptionalIndex, int32), MapIndex => @intToEnum(MapIndex, int32), OptionalMapIndex => @intToEnum(OptionalMapIndex, int32), + RuntimeIndex => @intToEnum(RuntimeIndex, int32), + NullTerminatedString => @intToEnum(NullTerminatedString, int32), + OptionalNullTerminatedString => @intToEnum(OptionalNullTerminatedString, int32), i32 => @bitCast(i32, int32), Pointer.Flags => @bitCast(Pointer.Flags, int32), TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32), + PtrVar.Flags => @bitCast(PtrVar.Flags, int32), else => @compileError("bad field type: " ++ @typeName(field.type)), }; } @@ -3274,7 +3424,7 @@ pub fn childType(ip: InternPool, i: Index) Index { }; } -/// Given a slice type, returns the type of the pointer field. +/// Given a slice type, returns the type of the ptr field. pub fn slicePtrType(ip: InternPool, i: Index) Index { switch (i) { .const_slice_u8_type => return .manyptr_const_u8_type, @@ -3288,10 +3438,29 @@ pub fn slicePtrType(ip: InternPool, i: Index) Index { } } +/// Given a slice value, returns the value of the ptr field. +pub fn slicePtr(ip: InternPool, i: Index) Index { + const item = ip.items.get(@enumToInt(i)); + switch (item.tag) { + .ptr_slice => return ip.extraData(PtrSlice, item.data).ptr, + else => unreachable, // not a slice value + } +} + +/// Given a slice value, returns the value of the len field. +pub fn sliceLen(ip: InternPool, i: Index) Index { + const item = ip.items.get(@enumToInt(i)); + switch (item.tag) { + .ptr_slice => return ip.extraData(PtrSlice, item.data).len, + else => unreachable, // not a slice value + } +} + /// Given an existing value, returns the same value but with the supplied type. /// Only some combinations are allowed: /// * int <=> int /// * int <=> enum +/// * ptr <=> ptr pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { switch (ip.indexToKey(val)) { .int => |int| switch (ip.indexToKey(new_ty)) { @@ -3305,6 +3474,13 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al // Assume new_ty is an integer type. return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty); }, + .ptr => |ptr| switch (ip.indexToKey(new_ty)) { + .ptr_type => return ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = ptr.addr, + } }), + else => unreachable, + }, else => unreachable, } } @@ -3380,6 +3556,15 @@ pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.Inferre return @intToEnum(Module.Fn.InferredErrorSet.Index, datas[@enumToInt(val)]).toOptional(); } +pub fn isPointerType(ip: InternPool, ty: Index) bool { + const tags = ip.items.items(.tag); + if (ty == .none) return false; + return switch (tags[@enumToInt(ty)]) { + .type_pointer, .type_slice => true, + else => false, + }; +} + pub fn isOptionalType(ip: InternPool, ty: Index) bool { const tags = ip.items.items(.tag); if (ty == .none) return false; @@ -3485,8 +3670,11 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .undef => 0, .simple_type => 0, .simple_value => 0, + .ptr_var => @sizeOf(PtrVar), .ptr_decl => @sizeOf(PtrDecl), + .ptr_mut_decl => @sizeOf(PtrMutDecl), .ptr_int => @sizeOf(PtrInt), + .ptr_slice => @sizeOf(PtrSlice), .opt_null => 0, .opt_payload => 0, .int_u8 => 0, diff --git a/src/Module.zig b/src/Module.zig index 70b08ea3a98f..dc9b9402bd9b 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5762,7 +5762,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // Crucially, this happens *after* we set the function state to success above, // so that dependencies on the function body will now be satisfied rather than // result in circular dependency errors. - sema.resolveFnTypes(fn_ty_info) catch |err| switch (err) { + sema.resolveFnTypes(mod.typeToFunc(fn_ty).?) catch |err| switch (err) { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, diff --git a/src/Sema.zig b/src/Sema.zig index 145f514ebca9..c98c61ddc4c9 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -13072,25 +13072,32 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // as zero-filling a byte array. if (lhs_len == 1) { const elem_val = try lhs_sub_val.elemValue(mod, 0); - break :v try Value.Tag.repeated.create(sema.arena, elem_val); + break :v try mod.intern(.{ .aggregate = .{ + .ty = result_ty.ip_index, + .storage = .{ .repeated_elem = elem_val.ip_index }, + } }); } - const element_vals = try sema.arena.alloc(Value, final_len_including_sent); + const element_vals = try sema.arena.alloc(InternPool.Index, final_len_including_sent); var elem_i: usize = 0; while (elem_i < result_len) { var lhs_i: usize = 0; while (lhs_i < lhs_len) : (lhs_i += 1) { const elem_val = try lhs_sub_val.elemValue(mod, lhs_i); - element_vals[elem_i] = elem_val; + assert(elem_val.ip_index != .none); + element_vals[elem_i] = elem_val.ip_index; elem_i += 1; } } if (lhs_info.sentinel) |sent_val| { - element_vals[result_len] = sent_val; + element_vals[result_len] = sent_val.ip_index; } - break :v try Value.Tag.aggregate.create(sema.arena, element_vals); + break :v try mod.intern(.{ .aggregate = .{ + .ty = result_ty.ip_index, + .storage = .{ .elems = element_vals }, + } }); }; - return sema.addConstantMaybeRef(block, result_ty, val, ptr_addrspace != null); + return sema.addConstantMaybeRef(block, result_ty, val.toValue(), ptr_addrspace != null); } try sema.requireRuntimeBlock(block, src, lhs_src); @@ -18111,12 +18118,16 @@ fn finishStructInit( } else null; const runtime_index = opt_runtime_index orelse { - const values = try sema.arena.alloc(Value, field_inits.len); - for (field_inits, 0..) |field_init, i| { - values[i] = (sema.resolveMaybeUndefVal(field_init) catch unreachable).?; - } - const struct_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstantMaybeRef(block, struct_ty, struct_val, is_ref); + const elems = try sema.arena.alloc(InternPool.Index, field_inits.len); + for (elems, field_inits, 0..) |*elem, field_init, field_i| { + elem.* = try (sema.resolveMaybeUndefVal(field_init) catch unreachable).? + .intern(struct_ty.structFieldType(field_i, mod), mod); + } + const struct_val = try mod.intern(.{ .aggregate = .{ + .ty = struct_ty.ip_index, + .storage = .{ .elems = elems }, + } }); + return sema.addConstantMaybeRef(block, struct_ty, struct_val.toValue(), is_ref); }; if (is_ref) { @@ -18195,21 +18206,20 @@ fn zirStructInitAnon( const init = try sema.resolveInst(item.data.init); field_ty.* = sema.typeOf(init).ip_index; - if (types[i].toType().zigTypeTag(mod) == .Opaque) { + if (field_ty.toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, types[i].toType()); + try sema.addDeclaredHereNote(msg, field_ty.toType()); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(init)) |init_val| { - assert(init_val.ip_index != .none); - values[i] = init_val.ip_index; + values[i] = try init_val.intern(field_ty.toType(), mod); } else { values[i] = .none; runtime_index = i; @@ -24891,9 +24901,7 @@ fn structFieldVal( if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { return sema.addConstant(field.ty, opv); } - - const field_values = struct_val.castTag(.aggregate).?.data; - return sema.addConstant(field.ty, field_values[field_index]); + return sema.addConstant(field.ty, try struct_val.fieldValue(field.ty, mod, field_index)); } try sema.requireRuntimeBlock(block, src, null); @@ -27925,7 +27933,24 @@ fn beginComptimePtrMutation( ptr_elem_ty, parent.decl_ref_mut, ), + .repeated => { + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(elems, val_ptr.castTag(.repeated).?.data); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &elems[field_index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, .@"union" => { // We need to set the active field of the union. const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); @@ -28107,6 +28132,13 @@ fn beginComptimePtrMutationInner( const mod = sema.mod; const target = mod.getTarget(); const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; + + const decl = mod.declPtr(decl_ref_mut.decl_index); + var decl_arena: std.heap.ArenaAllocator = undefined; + const allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena); + defer decl.value_arena.?.release(&decl_arena); + decl_val.* = try decl_val.unintern(allocator, mod); + if (coerce_ok) { return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, @@ -28412,6 +28444,27 @@ fn beginComptimePtrLoad( }, else => switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { .int => return error.RuntimeLoad, + .ptr => |ptr| switch (ptr.addr) { + .@"var", .int => return error.RuntimeLoad, + .decl, .mut_decl => blk: { + const decl_index = switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }; + const decl = mod.declPtr(decl_index); + const decl_tv = try decl.typedValue(); + if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad; + + const layout_defined = decl.ty.hasWellDefinedLayout(mod); + break :blk ComptimePtrLoadKit{ + .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, + .pointee = decl_tv, + .is_mutable = false, + .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, + }; + }, + }, else => unreachable, }, }; @@ -29425,7 +29478,7 @@ fn analyzeSlicePtr( const result_ty = slice_ty.slicePtrFieldType(mod); if (try sema.resolveMaybeUndefVal(slice)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(result_ty); - return sema.addConstant(result_ty, val.slicePtr()); + return sema.addConstant(result_ty, val.slicePtr(mod)); } try sema.requireRuntimeBlock(block, slice_src, null); return block.addTyOp(.slice_ptr, result_ty, slice); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index c9cc48590381..a98b77c96432 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -566,7 +566,7 @@ pub const DeclGen = struct { try writer.writeAll("){ .ptr = "); } - try dg.renderValue(writer, ty.slicePtrFieldType(mod), val.slicePtr(), .Initializer); + try dg.renderValue(writer, ty.slicePtrFieldType(mod), val.slicePtr(mod), .Initializer); const len_val = try mod.intValue(Type.usize, val.sliceLen(mod)); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 1da3d91b13d1..2f4f6589b5f1 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3363,125 +3363,223 @@ pub const DeclGen = struct { }, else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { .int => |int| return lowerIntAsPtr(dg, int), + .ptr => |ptr| { + const ptr_val = switch (ptr.addr) { + .@"var" => |@"var"| ptr: { + const decl = dg.module.declPtr(@"var".owner_decl); + dg.module.markDeclAlive(decl); + + const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); + const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); + + const val = try dg.resolveGlobalDecl(@"var".owner_decl); + const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) + val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) + else + val; + break :ptr addrspace_casted_ptr; + }, + .decl => |decl| try lowerDeclRefValue(dg, tv, decl), + .mut_decl => |mut_decl| try lowerDeclRefValue(dg, tv, mut_decl.decl), + .int => |int| lowerIntAsPtr(dg, mod.intern_pool.indexToKey(int).int), + }; + switch (ptr.len) { + .none => return ptr_val, + else => { + const fields: [2]*llvm.Value = .{ + ptr_val, + try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }), + }; + return dg.context.constStruct(&fields, fields.len, .False); + }, + } + }, else => unreachable, }, }, - .Array => switch (tv.val.tag()) { - .bytes => { - const bytes = tv.val.castTag(.bytes).?.data; - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), - .True, // Don't null terminate. Bytes has the sentinel, if any. - ); - }, - .str_lit => { - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - if (tv.ty.sentinel(mod)) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); - if (byte == 0 and bytes.len > 0) { + .Array => switch (tv.val.ip_index) { + .none => switch (tv.val.tag()) { + .bytes => { + const bytes = tv.val.castTag(.bytes).?.data; + return dg.context.constString( + bytes.ptr, + @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), + .True, // Don't null terminate. Bytes has the sentinel, if any. + ); + }, + .str_lit => { + const str_lit = tv.val.castTag(.str_lit).?.data; + const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + if (tv.ty.sentinel(mod)) |sent_val| { + const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); + if (byte == 0 and bytes.len > 0) { + return dg.context.constString( + bytes.ptr, + @intCast(c_uint, bytes.len), + .False, // Yes, null terminate. + ); + } + var array = std.ArrayList(u8).init(dg.gpa); + defer array.deinit(); + try array.ensureUnusedCapacity(bytes.len + 1); + array.appendSliceAssumeCapacity(bytes); + array.appendAssumeCapacity(byte); + return dg.context.constString( + array.items.ptr, + @intCast(c_uint, array.items.len), + .True, // Don't null terminate. + ); + } else { return dg.context.constString( bytes.ptr, @intCast(c_uint, bytes.len), - .False, // Yes, null terminate. + .True, // Don't null terminate. `bytes` has the sentinel, if any. ); } - var array = std.ArrayList(u8).init(dg.gpa); - defer array.deinit(); - try array.ensureUnusedCapacity(bytes.len + 1); - array.appendSliceAssumeCapacity(bytes); - array.appendAssumeCapacity(byte); - return dg.context.constString( - array.items.ptr, - @intCast(c_uint, array.items.len), - .True, // Don't null terminate. - ); - } else { - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, bytes.len), - .True, // Don't null terminate. `bytes` has the sentinel, if any. - ); - } - }, - .aggregate => { - const elem_vals = tv.val.castTag(.aggregate).?.data; - const elem_ty = tv.ty.childType(mod); - const gpa = dg.gpa; - const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel(mod)); - const llvm_elems = try gpa.alloc(*llvm.Value, len); - defer gpa.free(llvm_elems); - var need_unnamed = false; - for (elem_vals[0..len], 0..) |elem_val, i| { - llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - } - }, - .repeated => { - const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.childType(mod); - const sentinel = tv.ty.sentinel(mod); - const len = @intCast(usize, tv.ty.arrayLen(mod)); - const len_including_sent = len + @boolToInt(sentinel != null); - const gpa = dg.gpa; - const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); - defer gpa.free(llvm_elems); - - var need_unnamed = false; - if (len != 0) { - for (llvm_elems[0..len]) |*elem| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); + }, + .aggregate => { + const elem_vals = tv.val.castTag(.aggregate).?.data; + const elem_ty = tv.ty.childType(mod); + const gpa = dg.gpa; + const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel(mod)); + const llvm_elems = try gpa.alloc(*llvm.Value, len); + defer gpa.free(llvm_elems); + var need_unnamed = false; + for (elem_vals[0..len], 0..) |elem_val, i| { + llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); } - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); - } + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + .repeated => { + const val = tv.val.castTag(.repeated).?.data; + const elem_ty = tv.ty.childType(mod); + const sentinel = tv.ty.sentinel(mod); + const len = @intCast(usize, tv.ty.arrayLen(mod)); + const len_including_sent = len + @boolToInt(sentinel != null); + const gpa = dg.gpa; + const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); + defer gpa.free(llvm_elems); - if (sentinel) |sent| { - llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); - } + var need_unnamed = false; + if (len != 0) { + for (llvm_elems[0..len]) |*elem| { + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); + } + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); + } - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - } + if (sentinel) |sent| { + llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); + } + + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + .empty_array_sentinel => { + const elem_ty = tv.ty.childType(mod); + const sent_val = tv.ty.sentinel(mod).?; + const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val }); + const llvm_elems: [1]*llvm.Value = .{sentinel}; + const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); + if (need_unnamed) { + return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len); + } + }, + else => unreachable, }, - .empty_array_sentinel => { - const elem_ty = tv.ty.childType(mod); - const sent_val = tv.ty.sentinel(mod).?; - const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val }); - const llvm_elems: [1]*llvm.Value = .{sentinel}; - const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); - if (need_unnamed) { - return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len); - } + else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .aggregate => |aggregate| switch (aggregate.storage) { + .elems => |elem_vals| { + const elem_ty = tv.ty.childType(mod); + const gpa = dg.gpa; + const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len); + defer gpa.free(llvm_elems); + var need_unnamed = false; + for (elem_vals, 0..) |elem_val, i| { + llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); + } + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + .repeated_elem => |val| { + const elem_ty = tv.ty.childType(mod); + const sentinel = tv.ty.sentinel(mod); + const len = @intCast(usize, tv.ty.arrayLen(mod)); + const len_including_sent = len + @boolToInt(sentinel != null); + const gpa = dg.gpa; + const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); + defer gpa.free(llvm_elems); + + var need_unnamed = false; + if (len != 0) { + for (llvm_elems[0..len]) |*elem| { + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val.toValue() }); + } + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); + } + + if (sentinel) |sent| { + llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); + } + + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + }, + else => unreachable, }, - else => unreachable, }, .Optional => { comptime assert(optional_layout_version == 3); @@ -3494,15 +3592,22 @@ pub const DeclGen = struct { return non_null_bit; } const llvm_ty = try dg.lowerType(tv.ty); - if (tv.ty.optionalReprIsPayload(mod)) { - if (tv.val.castTag(.opt_payload)) |payload| { - return dg.lowerValue(.{ .ty = payload_ty, .val = payload.data }); - } else if (is_pl) { - return dg.lowerValue(.{ .ty = payload_ty, .val = tv.val }); - } else { - return llvm_ty.constNull(); - } - } + if (tv.ty.optionalReprIsPayload(mod)) return switch (tv.val.ip_index) { + .none => if (tv.val.castTag(.opt_payload)) |payload| + try dg.lowerValue(.{ .ty = payload_ty, .val = payload.data }) + else if (is_pl) + try dg.lowerValue(.{ .ty = payload_ty, .val = tv.val }) + else + llvm_ty.constNull(), + .null_value => llvm_ty.constNull(), + else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .opt => |opt| switch (opt.val) { + .none => llvm_ty.constNull(), + else => dg.lowerValue(.{ .ty = payload_ty, .val = opt.val.toValue() }), + }, + else => unreachable, + }, + }; assert(payload_ty.zigTypeTag(mod) != .Fn); const llvm_field_count = llvm_ty.countStructElementTypes(); @@ -3589,7 +3694,6 @@ pub const DeclGen = struct { }, .Struct => { const llvm_struct_ty = try dg.lowerType(tv.ty); - const field_vals = tv.val.castTag(.aggregate).?.data; const gpa = dg.gpa; const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { @@ -3623,7 +3727,7 @@ pub const DeclGen = struct { const field_llvm_val = try dg.lowerValue(.{ .ty = field_ty.toType(), - .val = field_vals[i], + .val = try tv.val.fieldValue(field_ty.toType(), mod, i), }); need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); @@ -3669,13 +3773,12 @@ pub const DeclGen = struct { comptime assert(Type.packed_struct_layout_version == 2); var running_int: *llvm.Value = int_llvm_ty.constNull(); var running_bits: u16 = 0; - for (field_vals, 0..) |field_val, i| { - const field = fields[i]; + for (fields, 0..) |field, i| { if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const non_int_val = try dg.lowerValue(.{ .ty = field.ty, - .val = field_val, + .val = try tv.val.fieldValue(field.ty, mod, i), }); const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const small_int_ty = dg.context.intType(ty_bit_size); @@ -3722,7 +3825,7 @@ pub const DeclGen = struct { const field_llvm_val = try dg.lowerValue(.{ .ty = field.ty, - .val = field_vals[field_and_index.index], + .val = try tv.val.fieldValue(field.ty, mod, field_and_index.index), }); need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); @@ -3756,7 +3859,13 @@ pub const DeclGen = struct { }, .Union => { const llvm_union_ty = try dg.lowerType(tv.ty); - const tag_and_val = tv.val.castTag(.@"union").?.data; + const tag_and_val: Value.Payload.Union.Data = switch (tv.val.ip_index) { + .none => tv.val.castTag(.@"union").?.data, + else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .un => |un| .{ .tag = un.tag.toValue(), .val = un.val.toValue() }, + else => unreachable, + }, + }; const layout = tv.ty.unionGetLayout(mod); diff --git a/src/value.zig b/src/value.zig index db79fa3fe689..ea45f0f90659 100644 --- a/src/value.zig +++ b/src/value.zig @@ -602,6 +602,73 @@ pub const Value = struct { return result; } + pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { + if (val.ip_index != .none) return val.ip_index; + switch (val.tag()) { + .slice => { + const pl = val.castTag(.slice).?.data; + const ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod); + return mod.intern(.{ .ptr = .{ + .ty = ty.ip_index, + .addr = mod.intern_pool.indexToKey(ptr).ptr.addr, + .len = try pl.len.intern(Type.usize, mod), + } }); + }, + .opt_payload => return mod.intern(.{ .opt = .{ + .ty = ty.ip_index, + .val = try val.castTag(.opt_payload).?.data.intern(ty.childType(mod), mod), + } }), + .aggregate => { + const old_elems = val.castTag(.aggregate).?.data; + const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); + defer mod.gpa.free(new_elems); + const ty_key = mod.intern_pool.indexToKey(ty.ip_index); + for (new_elems, old_elems, 0..) |*new_elem, old_elem, field_i| + new_elem.* = try old_elem.intern(switch (ty_key) { + .struct_type => ty.structFieldType(field_i, mod), + .anon_struct_type => |info| info.types[field_i].toType(), + inline .array_type, .vector_type => |info| info.child.toType(), + else => unreachable, + }, mod); + return mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .elems = new_elems }, + } }); + }, + .repeated => return mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = try val.castTag(.repeated).?.data.intern( + ty.structFieldType(0, mod), + mod, + ) }, + } }), + .@"union" => { + const pl = val.castTag(.@"union").?.data; + return mod.intern(.{ .un = .{ + .ty = ty.ip_index, + .tag = try pl.tag.intern(ty.unionTagTypeHypothetical(mod), mod), + .val = try pl.val.intern(ty.unionFieldType(pl.tag, mod), mod), + } }); + }, + else => unreachable, + } + } + + pub fn unintern(val: Value, arena: Allocator, mod: *Module) Allocator.Error!Value { + if (val.ip_index == .none) return val; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .aggregate => |aggregate| switch (aggregate.storage) { + .elems => |old_elems| { + const new_elems = try arena.alloc(Value, old_elems.len); + for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = old_elem.toValue(); + return Tag.aggregate.create(arena, new_elems); + }, + .repeated_elem => |elem| return Tag.repeated.create(arena, elem.toValue()), + }, + else => return val, + } + } + pub fn toIntern(val: Value) InternPool.Index { assert(val.ip_index != .none); return val.ip_index; @@ -2002,11 +2069,11 @@ pub const Value = struct { const ptr_ty = ty.slicePtrFieldType(mod); const a_ptr = switch (a_ty.ptrSize(mod)) { - .Slice => a.slicePtr(), + .Slice => a.slicePtr(mod), .One => a, else => unreachable, }; - return try eqlAdvanced(a_ptr, ptr_ty, b.slicePtr(), ptr_ty, mod, opt_sema); + return try eqlAdvanced(a_ptr, ptr_ty, b.slicePtr(mod), ptr_ty, mod, opt_sema); }, .Many, .C, .One => {}, }, @@ -2429,7 +2496,8 @@ pub const Value = struct { } } - pub fn slicePtr(val: Value) Value { + pub fn slicePtr(val: Value, mod: *Module) Value { + if (val.ip_index != .none) return mod.intern_pool.slicePtr(val.ip_index).toValue(); return switch (val.tag()) { .slice => val.castTag(.slice).?.data.ptr, // TODO this should require being a slice tag, and not allow decl_ref, field_ptr, etc. @@ -2439,6 +2507,7 @@ pub const Value = struct { } pub fn sliceLen(val: Value, mod: *Module) u64 { + if (val.ip_index != .none) return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod); return switch (val.tag()) { .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod), .decl_ref => { @@ -2531,7 +2600,19 @@ pub const Value = struct { else => unreachable, }, - else => unreachable, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .@"var" => unreachable, + .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), + .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), + .int => unreachable, + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .elems => |elems| elems[index].toValue(), + .repeated_elem => |elem| elem.toValue(), + }, + else => unreachable, + }, } } @@ -2675,6 +2756,7 @@ pub const Value = struct { } pub fn unionTag(val: Value, mod: *Module) Value { + if (val.ip_index == .none) return val.castTag(.@"union").?.data.tag; return switch (mod.intern_pool.indexToKey(val.ip_index)) { .undef, .enum_tag => val, .un => |un| un.tag.toValue(), @@ -2696,7 +2778,7 @@ pub const Value = struct { else => val, }; - if (ptr_val.tag() == .elem_ptr) { + if (ptr_val.ip_index == .none and ptr_val.tag() == .elem_ptr) { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; if (elem_ptr.elem_ty.eql(elem_ty, mod)) { return Tag.elem_ptr.create(arena, .{ @@ -4809,10 +4891,12 @@ pub const Value = struct { pub const base_tag = Tag.@"union"; base: Payload = .{ .tag = base_tag }, - data: struct { + data: Data, + + pub const Data = struct { tag: Value, val: Value, - }, + }; }; }; @@ -4844,15 +4928,7 @@ pub const Value = struct { return if (x) one else zero; } - pub const RuntimeIndex = enum(u32) { - zero = 0, - comptime_field_ptr = std.math.maxInt(u32), - _, - - pub fn increment(ri: *RuntimeIndex) void { - ri.* = @intToEnum(RuntimeIndex, @enumToInt(ri.*) + 1); - } - }; + pub const RuntimeIndex = InternPool.RuntimeIndex; /// This function is used in the debugger pretty formatters in tools/ to fetch the /// Tag to Payload mapping to facilitate fancy debug printing for this type. From dfb3521160eb4397b7482de68796d370230a8d11 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 20 May 2023 15:50:56 -0700 Subject: [PATCH 078/205] compiler: remove var_args_param_type from SimpleType This is now represented instead by a special `InternPool.Index.Tag` that has no corresponding type/value. --- src/Air.zig | 4 +++- src/InternPool.zig | 7 ++----- src/Sema.zig | 7 ++----- src/Zir.zig | 4 +++- src/type.zig | 10 ---------- 5 files changed, 10 insertions(+), 22 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 6673a37fb61e..9c1b4997631c 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -905,7 +905,6 @@ pub const Inst = struct { const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), - var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), undef = @enumToInt(InternPool.Index.undef), zero = @enumToInt(InternPool.Index.zero), @@ -926,6 +925,9 @@ pub const Inst = struct { empty_struct = @enumToInt(InternPool.Index.empty_struct), generic_poison = @enumToInt(InternPool.Index.generic_poison), + /// This Ref does not correspond to any AIR instruction or constant + /// value. It is used to handle argument types of var args functions. + var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), /// This Ref does not correspond to any AIR instruction or constant /// value and may instead be used as a sentinel to indicate null. none = @enumToInt(InternPool.Index.none), diff --git a/src/InternPool.zig b/src/InternPool.zig index 8b826458a846..130bcc1cad5e 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -959,7 +959,6 @@ pub const Index = enum(u32) { const_slice_u8_sentinel_0_type, anyerror_void_error_union_type, generic_poison_type, - var_args_param_type, /// `@TypeOf(.{})` empty_struct_type, @@ -1002,6 +1001,8 @@ pub const Index = enum(u32) { /// is not known until generic function instantiation. generic_poison, + /// Used by Air/Sema only. + var_args_param_type = std.math.maxInt(u32) - 1, none = std.math.maxInt(u32), _, @@ -1195,9 +1196,6 @@ pub const static_keys = [_]Key{ // generic_poison_type .{ .simple_type = .generic_poison }, - // var_args_param_type - .{ .simple_type = .var_args_param }, - // empty_struct_type .{ .anon_struct_type = .{ .types = &.{}, @@ -1570,7 +1568,6 @@ pub const SimpleType = enum(u32) { type_info, generic_poison, - var_args_param, }; pub const SimpleValue = enum(u32) { diff --git a/src/Sema.zig b/src/Sema.zig index c98c61ddc4c9..4f0bfd712072 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -31657,7 +31657,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .anyerror, .noreturn, .generic_poison, - .var_args_param, .atomic_order, .atomic_rmw_op, .calling_convention, @@ -31856,6 +31855,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { const mod = sema.mod; switch (ty.ip_index) { + .var_args_param_type => unreachable, + // TODO: After the InternPool transition is complete, change this to `unreachable`. .none => return ty, @@ -31909,7 +31910,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .const_slice_u8_sentinel_0_type, .anyerror_void_error_union_type, .generic_poison_type, - .var_args_param_type, .empty_struct_type, => return ty, @@ -33123,7 +33123,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .undefined => Value.undef, .generic_poison => return error.GenericPoison, - .var_args_param => unreachable, }, .struct_type => |struct_type| { const resolved_ty = try sema.resolveTypeFields(ty); @@ -33678,8 +33677,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_literal, .type_info, => true, - - .var_args_param => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; diff --git a/src/Zir.zig b/src/Zir.zig index ec3288620c42..7af779c1aaca 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2112,7 +2112,6 @@ pub const Inst = struct { const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), - var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), undef = @enumToInt(InternPool.Index.undef), zero = @enumToInt(InternPool.Index.zero), @@ -2133,6 +2132,9 @@ pub const Inst = struct { empty_struct = @enumToInt(InternPool.Index.empty_struct), generic_poison = @enumToInt(InternPool.Index.generic_poison), + /// This tag is here to match Air and InternPool, however it is unused + /// for ZIR purposes. + var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), /// This Ref does not correspond to any ZIR instruction or constant /// value and may instead be used as a sentinel to indicate null. none = @enumToInt(InternPool.Index.none), diff --git a/src/type.zig b/src/type.zig index d0cf05484582..d1ee185be5e8 100644 --- a/src/type.zig +++ b/src/type.zig @@ -105,7 +105,6 @@ pub const Type = struct { .type_info => .Union, .generic_poison => return error.GenericPoison, - .var_args_param => unreachable, }, // values, not types @@ -803,7 +802,6 @@ pub const Type = struct { => false, .generic_poison => unreachable, - .var_args_param => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { @@ -952,8 +950,6 @@ pub const Type = struct { .type_info, .generic_poison, => false, - - .var_args_param => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { @@ -1198,7 +1194,6 @@ pub const Type = struct { .noreturn => unreachable, .generic_poison => unreachable, - .var_args_param => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse @@ -1610,7 +1605,6 @@ pub const Type = struct { .type_info => unreachable, .noreturn => unreachable, .generic_poison => unreachable, - .var_args_param => unreachable, }, .struct_type => |struct_type| switch (ty.containerLayout(mod)) { .Packed => { @@ -1841,7 +1835,6 @@ pub const Type = struct { .undefined => unreachable, .enum_literal => unreachable, .generic_poison => unreachable, - .var_args_param => unreachable, .atomic_order => unreachable, // missing call to resolveTypeFields .atomic_rmw_op => unreachable, // missing call to resolveTypeFields @@ -2717,7 +2710,6 @@ pub const Type = struct { .undefined => return Value.undef, .generic_poison => unreachable, - .var_args_param => unreachable, }, .struct_type => |struct_type| { if (mod.structPtrUnwrap(struct_type.index)) |s| { @@ -2896,8 +2888,6 @@ pub const Type = struct { .enum_literal, .type_info, => true, - - .var_args_param => unreachable, }, .struct_type => |struct_type| { // A struct with no fields is not comptime-only. From 65d65f5dda144d76ea9bbd82b2b5aacb09d7ae34 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 20 May 2023 15:52:28 -0700 Subject: [PATCH 079/205] Module: remove tmp_hack_arena This was only needed when pointers were not fully migrated to InternPool yet. --- src/Compilation.zig | 1 - src/Module.zig | 5 ----- 2 files changed, 6 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 6291ce78d469..43b16241fc71 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1316,7 +1316,6 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .local_zir_cache = local_zir_cache, .emit_h = emit_h, .error_name_list = .{}, - .tmp_hack_arena = std.heap.ArenaAllocator.init(gpa), }; try module.init(); diff --git a/src/Module.zig b/src/Module.zig index dc9b9402bd9b..b718193abaa3 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -98,10 +98,6 @@ string_literal_bytes: ArrayListUnmanaged(u8) = .{}, /// Stores all Type and Value objects; periodically garbage collected. intern_pool: InternPool = .{}, -/// Temporarily used for some unfortunate allocations made by backends that need to construct -/// pointer types that can't be represented by the InternPool. Once all types are migrated -/// to be stored in the InternPool, this can be removed. -tmp_hack_arena: std.heap.ArenaAllocator, /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch @@ -3423,7 +3419,6 @@ pub fn deinit(mod: *Module) void { mod.string_literal_bytes.deinit(gpa); mod.intern_pool.deinit(gpa); - mod.tmp_hack_arena.deinit(); } pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { From 7e19c9566860e78ad536aaa678af8c32531fade9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 20 May 2023 17:55:40 -0700 Subject: [PATCH 080/205] Sema: move `inferred_alloc_const/mut_type` to InternPool Now, all types are migrated to use `InternPool`. The `Type.Tag` enum is deleted in this commit. --- src/Air.zig | 2 + src/InternPool.zig | 17 +- src/Module.zig | 2 +- src/Sema.zig | 96 ++-- src/Zir.zig | 2 + src/codegen/c.zig | 193 ++++---- src/codegen/llvm.zig | 57 ++- src/link/Dwarf.zig | 19 +- src/print_air.zig | 8 +- src/type.zig | 1056 ++++++++++++++++-------------------------- src/value.zig | 8 +- 11 files changed, 594 insertions(+), 866 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 9c1b4997631c..3314f2897e2e 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -905,6 +905,8 @@ pub const Inst = struct { const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), + inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type), + inferred_alloc_mut_type = @enumToInt(InternPool.Index.inferred_alloc_mut_type), empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), undef = @enumToInt(InternPool.Index.undef), zero = @enumToInt(InternPool.Index.zero), diff --git a/src/InternPool.zig b/src/InternPool.zig index 130bcc1cad5e..b9b706e499ca 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -959,6 +959,8 @@ pub const Index = enum(u32) { const_slice_u8_sentinel_0_type, anyerror_void_error_union_type, generic_poison_type, + inferred_alloc_const_type, + inferred_alloc_mut_type, /// `@TypeOf(.{})` empty_struct_type, @@ -1009,10 +1011,7 @@ pub const Index = enum(u32) { pub fn toType(i: Index) @import("type.zig").Type { assert(i != .none); - return .{ - .ip_index = i, - .legacy = undefined, - }; + return .{ .ip_index = i }; } pub fn toValue(i: Index) @import("value.zig").Value { @@ -1195,6 +1194,10 @@ pub const static_keys = [_]Key{ // generic_poison_type .{ .simple_type = .generic_poison }, + // inferred_alloc_const_type + .{ .simple_type = .inferred_alloc_const }, + // inferred_alloc_mut_type + .{ .simple_type = .inferred_alloc_mut }, // empty_struct_type .{ .anon_struct_type = .{ @@ -1568,6 +1571,12 @@ pub const SimpleType = enum(u32) { type_info, generic_poison, + /// TODO: remove this from `SimpleType`; instead make it only a special `Index` tag like + /// `var_args_param_type`. + inferred_alloc_const, + /// TODO: remove this from `SimpleType`; instead make it only a special `Index` tag like + /// `var_args_param_type`. + inferred_alloc_mut, }; pub const SimpleValue = enum(u32) { diff --git a/src/Module.zig b/src/Module.zig index b718193abaa3..982c568d244f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6818,7 +6818,7 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { } pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { - const info = ptr_ty.ptrInfoIp(mod.intern_pool); + const info = Type.ptrInfoIp(mod.intern_pool, ptr_ty.toIntern()); return mod.ptrType(.{ .elem_type = new_child.toIntern(), diff --git a/src/Sema.zig b/src/Sema.zig index 4f0bfd712072..14383d107e54 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -904,10 +904,10 @@ fn analyzeBodyInner( const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .alloc => try sema.zirAlloc(block, inst), - .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), - .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), - .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_const)), - .alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_mut)), + .alloc_inferred => try sema.zirAllocInferred(block, inst, .{ .ip_index = .inferred_alloc_const_type }), + .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, .{ .ip_index = .inferred_alloc_mut_type }), + .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, .{ .ip_index = .inferred_alloc_const_type }), + .alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, .{ .ip_index = .inferred_alloc_mut_type }), .alloc_mut => try sema.zirAllocMut(block, inst), .alloc_comptime_mut => try sema.zirAllocComptime(block, inst), .make_ptr_const => try sema.zirMakePtrConst(block, inst), @@ -3471,9 +3471,9 @@ fn zirAllocExtended( } else 0; const inferred_alloc_ty = if (small.is_const) - Type.initTag(.inferred_alloc_const) + Type{ .ip_index = .inferred_alloc_const_type } else - Type.initTag(.inferred_alloc_mut); + Type{ .ip_index = .inferred_alloc_mut_type }; if (block.is_comptime or small.is_comptime) { if (small.has_type) { @@ -3707,9 +3707,10 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload; const ptr_val = sema.air_values.items[value_index]; - const var_is_mut = switch (sema.typeOf(ptr).tag()) { - .inferred_alloc_const => false, - .inferred_alloc_mut => true, + const var_is_mut = switch (sema.typeOf(ptr).toIntern()) { + .inferred_alloc_const_type => false, + .inferred_alloc_mut_type => true, + else => unreachable, }; const target = sema.mod.getTarget(); @@ -7451,7 +7452,7 @@ fn instantiateGenericCall( }; arg_val.hashUncoerced(arg_ty, &hasher, mod); if (is_anytype) { - arg_ty.hashWithHasher(&hasher, mod); + std.hash.autoHash(&hasher, arg_ty.toIntern()); generic_args[i] = .{ .ty = arg_ty, .val = arg_val, @@ -7465,7 +7466,7 @@ fn instantiateGenericCall( }; } } else if (is_anytype) { - arg_ty.hashWithHasher(&hasher, mod); + std.hash.autoHash(&hasher, arg_ty.toIntern()); generic_args[i] = .{ .ty = arg_ty, .val = Value.generic_poison, @@ -8233,7 +8234,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); return sema.addConstant( - .{ .ip_index = .enum_literal_type, .legacy = undefined }, + .{ .ip_index = .enum_literal_type }, try Value.Tag.enum_literal.create(sema.arena, duped_name), ); } @@ -13278,9 +13279,12 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs_val = maybe_rhs_val orelse unreachable; const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable; if (!rem.compareAllWithZero(.eq, mod)) { - return sema.fail(block, src, "ambiguous coercion of division operands '{s}' and '{s}'; non-zero remainder '{}'", .{ - @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()), rem.fmtValue(resolved_type, sema.mod), - }); + return sema.fail( + block, + src, + "ambiguous coercion of division operands '{}' and '{}'; non-zero remainder '{}'", + .{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(resolved_type, mod) }, + ); } } @@ -13386,7 +13390,12 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const air_tag = if (is_int) blk: { if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) { - return sema.fail(block, src, "division with '{s}' and '{s}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()) }); + return sema.fail( + block, + src, + "division with '{}' and '{}': signed integers must use @divTrunc, @divFloor, or @divExact", + .{ lhs_ty.fmt(mod), rhs_ty.fmt(mod) }, + ); } break :blk Air.Inst.Tag.div_trunc; } else switch (block.float_mode) { @@ -23367,7 +23376,7 @@ fn validateRunTimeType( }; } -const TypeSet = std.HashMapUnmanaged(Type, void, Type.HashContext64, std.hash_map.default_max_load_percentage); +const TypeSet = std.AutoHashMapUnmanaged(InternPool.Index, void); fn explainWhyTypeIsComptime( sema: *Sema, @@ -23453,7 +23462,7 @@ fn explainWhyTypeIsComptimeInner( }, .Struct => { - if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return; + if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return; if (mod.typeToStruct(ty)) |struct_obj| { for (struct_obj.fields.values(), 0..) |field, i| { @@ -23472,7 +23481,7 @@ fn explainWhyTypeIsComptimeInner( }, .Union => { - if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return; + if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return; if (mod.typeToUnion(ty)) |union_obj| { for (union_obj.fields.values(), 0..) |field, i| { @@ -27459,8 +27468,8 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { // different behavior depending on whether the types were inferred. // Something seems wrong here. if (prev_ptr_ty.ip_index == .none) { - if (prev_ptr_ty.tag() == .inferred_alloc_mut) return null; - if (prev_ptr_ty.tag() == .inferred_alloc_const) return null; + if (prev_ptr_ty.ip_index == .inferred_alloc_mut_type) return null; + if (prev_ptr_ty.ip_index == .inferred_alloc_const_type) return null; } const prev_ptr_child_ty = prev_ptr_ty.childType(mod); @@ -31677,6 +31686,9 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_literal, .type_info, => true, + + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; @@ -31931,6 +31943,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .bool_false => unreachable, .empty_struct => unreachable, .generic_poison => unreachable, + .inferred_alloc_const_type => unreachable, + .inferred_alloc_mut_type => unreachable, .type_info_type => return sema.getBuiltinType("Type"), .extern_options_type => return sema.getBuiltinType("ExternOptions"), @@ -33032,16 +33046,9 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { /// TODO assert the return value matches `ty.onePossibleValue` pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const mod = sema.mod; - - switch (ty.ip_index) { - .empty_struct_type => return Value.empty_struct, - - .none => switch (ty.tag()) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - }, - - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (ty.ip_index) { + .empty_struct_type => Value.empty_struct, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) { return try mod.intValue(ty, 0); @@ -33123,6 +33130,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .undefined => Value.undef, .generic_poison => return error.GenericPoison, + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { const resolved_ty = try sema.resolveTypeFields(ty); @@ -33245,7 +33254,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .enum_tag => unreachable, .aggregate => unreachable, }, - } + }; } /// Returns the type of the AIR instruction. @@ -33563,16 +33572,15 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError /// This logic must be kept in sync with `Type.isPtrLikeOptional`. fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { const mod = sema.mod; - - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| switch (ptr_type.size) { - .Slice => return null, - .C => return ptr_type.elem_type.toType(), - .One, .Many => return ty, + .Slice => null, + .C => ptr_type.elem_type.toType(), + .One, .Many => ty, }, .opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) { .ptr_type => |ptr_type| switch (ptr_type.size) { - .Slice, .C => return null, + .Slice, .C => null, .Many, .One => { if (ptr_type.is_allowzero) return null; @@ -33585,15 +33593,10 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { return payload_ty; }, }, - else => return null, + else => null, }, - else => return null, + else => null, }; - - switch (ty.tag()) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - } } /// `generic_poison` will return false. @@ -33677,6 +33680,9 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_literal, .type_info, => true, + + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; diff --git a/src/Zir.zig b/src/Zir.zig index 7af779c1aaca..45a6fae90be4 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2112,6 +2112,8 @@ pub const Inst = struct { const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), + inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type), + inferred_alloc_mut_type = @enumToInt(InternPool.Index.inferred_alloc_mut_type), empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), undef = @enumToInt(InternPool.Index.undef), zero = @enumToInt(InternPool.Index.zero), diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a98b77c96432..d3b8e06e5d0d 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -5367,116 +5367,111 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { // Ensure complete type definition is visible before accessing fields. _ = try f.typeToIndex(struct_ty, .complete); - const field_name: CValue = switch (struct_ty.ip_index) { - .none => switch (struct_ty.tag()) { - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { - .struct_type => switch (struct_ty.containerLayout(mod)) { - .Auto, .Extern => if (struct_ty.isSimpleTuple(mod)) - .{ .field = extra.field_index } - else - .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, - .Packed => { - const struct_obj = mod.typeToStruct(struct_ty).?; - const int_info = struct_ty.intInfo(mod); - - const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); + const field_name: CValue = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + .struct_type => switch (struct_ty.containerLayout(mod)) { + .Auto, .Extern => if (struct_ty.isSimpleTuple(mod)) + .{ .field = extra.field_index } + else + .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, + .Packed => { + const struct_obj = mod.typeToStruct(struct_ty).?; + const int_info = struct_ty.intInfo(mod); - const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index); - const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - const field_int_signedness = if (inst_ty.isAbiInt(mod)) - inst_ty.intInfo(mod).signedness - else - .unsigned; - const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod))); + const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); - const temp_local = try f.allocLocal(inst, field_int_ty); - try f.writeCValue(writer, temp_local, .Other); - try writer.writeAll(" = zig_wrap_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty); - try writer.writeAll("(("); - try f.renderType(writer, field_int_ty); + const field_int_signedness = if (inst_ty.isAbiInt(mod)) + inst_ty.intInfo(mod).signedness + else + .unsigned; + const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod))); + + const temp_local = try f.allocLocal(inst, field_int_ty); + try f.writeCValue(writer, temp_local, .Other); + try writer.writeAll(" = zig_wrap_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty); + try writer.writeAll("(("); + try f.renderType(writer, field_int_ty); + try writer.writeByte(')'); + const cant_cast = int_info.bits > 64; + if (cant_cast) { + if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + try writer.writeAll("zig_lo_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); + try writer.writeByte('('); + } + if (bit_offset > 0) { + try writer.writeAll("zig_shr_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); + try writer.writeByte('('); + } + try f.writeCValue(writer, struct_byval, .Other); + if (bit_offset > 0) { + try writer.writeAll(", "); + try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try writer.writeByte(')'); - const cant_cast = int_info.bits > 64; - if (cant_cast) { - if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); - try writer.writeAll("zig_lo_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); - try writer.writeByte('('); - } - if (bit_offset > 0) { - try writer.writeAll("zig_shr_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); - try writer.writeByte('('); - } - try f.writeCValue(writer, struct_byval, .Other); - if (bit_offset > 0) { - try writer.writeAll(", "); - try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); - try writer.writeByte(')'); - } - if (cant_cast) try writer.writeByte(')'); - try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits); - try writer.writeAll(");\n"); - if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local; + } + if (cant_cast) try writer.writeByte(')'); + try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits); + try writer.writeAll(");\n"); + if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local; - const local = try f.allocLocal(inst, inst_ty); - try writer.writeAll("memcpy("); - try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument); - try writer.writeAll(", "); - try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument); - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("));\n"); - try freeLocal(f, inst, temp_local.new_local, 0); - return local; - }, + const local = try f.allocLocal(inst, inst_ty); + try writer.writeAll("memcpy("); + try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument); + try writer.writeAll(", "); + try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument); + try writer.writeAll(", sizeof("); + try f.renderType(writer, inst_ty); + try writer.writeAll("));\n"); + try freeLocal(f, inst, temp_local.new_local, 0); + return local; }, + }, - .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0) - .{ .field = extra.field_index } - else - .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0) + .{ .field = extra.field_index } + else + .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, + + .union_type => |union_type| field_name: { + const union_obj = mod.unionPtr(union_type.index); + if (union_obj.layout == .Packed) { + const operand_lval = if (struct_byval == .constant) blk: { + const operand_local = try f.allocLocal(inst, struct_ty); + try f.writeCValue(writer, operand_local, .Other); + try writer.writeAll(" = "); + try f.writeCValue(writer, struct_byval, .Initializer); + try writer.writeAll(";\n"); + break :blk operand_local; + } else struct_byval; - .union_type => |union_type| field_name: { - const union_obj = mod.unionPtr(union_type.index); - if (union_obj.layout == .Packed) { - const operand_lval = if (struct_byval == .constant) blk: { - const operand_local = try f.allocLocal(inst, struct_ty); - try f.writeCValue(writer, operand_local, .Other); - try writer.writeAll(" = "); - try f.writeCValue(writer, struct_byval, .Initializer); - try writer.writeAll(";\n"); - break :blk operand_local; - } else struct_byval; - - const local = try f.allocLocal(inst, inst_ty); - try writer.writeAll("memcpy(&"); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(", &"); - try f.writeCValue(writer, operand_lval, .Other); - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("));\n"); - - if (struct_byval == .constant) { - try freeLocal(f, inst, operand_lval.new_local, 0); - } + const local = try f.allocLocal(inst, inst_ty); + try writer.writeAll("memcpy(&"); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(", &"); + try f.writeCValue(writer, operand_lval, .Other); + try writer.writeAll(", sizeof("); + try f.renderType(writer, inst_ty); + try writer.writeAll("));\n"); - return local; - } else { - const name = union_obj.fields.keys()[extra.field_index]; - break :field_name if (union_type.hasTag()) .{ - .payload_identifier = name, - } else .{ - .identifier = name, - }; + if (struct_byval == .constant) { + try freeLocal(f, inst, operand_lval.new_local, 0); } - }, - else => unreachable, + + return local; + } else { + const name = union_obj.fields.keys()[extra.field_index]; + break :field_name if (union_type.hasTag()) .{ + .payload_identifier = name, + } else .{ + .identifier = name, + }; + } }, + else => unreachable, }; const local = try f.allocLocal(inst, inst_ty); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 2f4f6589b5f1..12d6a5752d9a 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -381,12 +381,7 @@ pub const Object = struct { /// This is an ArrayHashMap as opposed to a HashMap because in `flushModule` we /// want to iterate over it while adding entries to it. - pub const DITypeMap = std.ArrayHashMapUnmanaged( - Type, - AnnotatedDITypePtr, - Type.HashContext32, - true, - ); + pub const DITypeMap = std.AutoArrayHashMapUnmanaged(InternPool.Index, AnnotatedDITypePtr); pub fn create(gpa: Allocator, options: link.Options) !*Object { const obj = try gpa.create(Object); @@ -1437,7 +1432,7 @@ pub const Object = struct { const gpa = o.gpa; // Be careful not to reference this `gop` variable after any recursive calls // to `lowerDebugType`. - const gop = try o.di_type_map.getOrPutContext(gpa, ty, .{ .mod = o.module }); + const gop = try o.di_type_map.getOrPut(gpa, ty.toIntern()); if (gop.found_existing) { const annotated = gop.value_ptr.*; const di_type = annotated.toDIType(); @@ -1450,7 +1445,7 @@ pub const Object = struct { }; return o.lowerDebugTypeImpl(entry, resolve, di_type); } - errdefer assert(o.di_type_map.orderedRemoveContext(ty, .{ .mod = o.module })); + errdefer assert(o.di_type_map.orderedRemove(ty.toIntern())); const entry: Object.DITypeMap.Entry = .{ .key_ptr = gop.key_ptr, .value_ptr = gop.value_ptr, @@ -1465,7 +1460,7 @@ pub const Object = struct { resolve: DebugResolveStatus, opt_fwd_decl: ?*llvm.DIType, ) Allocator.Error!*llvm.DIType { - const ty = gop.key_ptr.*; + const ty = gop.key_ptr.toType(); const gpa = o.gpa; const target = o.target; const dib = o.di_builder.?; @@ -1498,7 +1493,7 @@ pub const Object = struct { const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(enum_di_ty)); return enum_di_ty; } @@ -1558,7 +1553,7 @@ pub const Object = struct { "", ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(enum_di_ty)); return enum_di_ty; }, .Float => { @@ -1577,7 +1572,7 @@ pub const Object = struct { }, .Pointer => { // Normalize everything that the debug info does not represent. - const ptr_info = ty.ptrInfoIp(mod.intern_pool); + const ptr_info = Type.ptrInfoIp(mod.intern_pool, ty.toIntern()); if (ptr_info.sentinel != .none or ptr_info.address_space != .generic or @@ -1603,7 +1598,7 @@ pub const Object = struct { }); const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.init(ptr_di_ty, resolve)); return ptr_di_ty; } @@ -1682,7 +1677,7 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; } @@ -1696,7 +1691,7 @@ pub const Object = struct { name, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(ptr_di_ty)); return ptr_di_ty; }, .Opaque => { @@ -1718,7 +1713,7 @@ pub const Object = struct { ); // The recursive call to `lowerDebugType` va `namespaceToDebugScope` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(opaque_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(opaque_di_ty)); return opaque_di_ty; }, .Array => { @@ -1729,7 +1724,7 @@ pub const Object = struct { @intCast(c_int, ty.arrayLen(mod)), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(array_di_ty)); return array_di_ty; }, .Vector => { @@ -1761,7 +1756,7 @@ pub const Object = struct { ty.vectorLen(mod), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(vector_di_ty)); return vector_di_ty; }, .Optional => { @@ -1777,7 +1772,7 @@ pub const Object = struct { if (ty.optionalReprIsPayload(mod)) { const ptr_di_ty = try o.lowerDebugType(child_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.init(ptr_di_ty, resolve)); return ptr_di_ty; } @@ -1850,7 +1845,7 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; }, .ErrorUnion => { @@ -1858,7 +1853,7 @@ pub const Object = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(err_set_di_ty)); return err_set_di_ty; } const name = try ty.nameAlloc(gpa, o.module); @@ -1941,7 +1936,7 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; }, .ErrorSet => { @@ -2038,7 +2033,7 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; }, .struct_type => |struct_type| s: { @@ -2057,7 +2052,7 @@ pub const Object = struct { dib.replaceTemporary(fwd_decl, struct_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(struct_di_ty)); return struct_di_ty; } }, @@ -2070,7 +2065,7 @@ pub const Object = struct { dib.replaceTemporary(fwd_decl, struct_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(struct_di_ty)); return struct_di_ty; } @@ -2126,7 +2121,7 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; }, .Union => { @@ -2155,7 +2150,7 @@ pub const Object = struct { dib.replaceTemporary(fwd_decl, union_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(union_di_ty)); return union_di_ty; } @@ -2182,7 +2177,7 @@ pub const Object = struct { dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; } @@ -2241,7 +2236,7 @@ pub const Object = struct { if (layout.tag_size == 0) { dib.replaceTemporary(fwd_decl, union_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(union_di_ty)); return union_di_ty; } @@ -2302,7 +2297,7 @@ pub const Object = struct { ); dib.replaceTemporary(fwd_decl, full_di_ty); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(full_di_ty)); return full_di_ty; }, .Fn => { @@ -2349,7 +2344,7 @@ pub const Object = struct { 0, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(fn_di_ty), .{ .mod = o.module }); + try o.di_type_map.put(gpa, ty.toIntern(), AnnotatedDITypePtr.initFull(fn_di_ty)); return fn_di_ty; }, .ComptimeInt => unreachable, diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 4d8e86562235..ed2883f4daea 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -87,12 +87,7 @@ pub const DeclState = struct { dbg_info: std.ArrayList(u8), abbrev_type_arena: std.heap.ArenaAllocator, abbrev_table: std.ArrayListUnmanaged(AbbrevEntry) = .{}, - abbrev_resolver: std.HashMapUnmanaged( - Type, - u32, - Type.HashContext64, - std.hash_map.default_max_load_percentage, - ) = .{}, + abbrev_resolver: std.AutoHashMapUnmanaged(InternPool.Index, u32) = .{}, abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{}, exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation) = .{}, @@ -142,9 +137,7 @@ pub const DeclState = struct { /// @symbol signifies a type abbreviation posititioned somewhere in the .debug_abbrev section /// which we use as our target of the relocation. fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void { - const resolv = self.abbrev_resolver.getContext(ty, .{ - .mod = self.mod, - }) orelse blk: { + const resolv = self.abbrev_resolver.get(ty.toIntern()) orelse blk: { const sym_index = @intCast(u32, self.abbrev_table.items.len); try self.abbrev_table.append(self.gpa, .{ .atom_index = atom_index, @@ -152,12 +145,8 @@ pub const DeclState = struct { .offset = undefined, }); log.debug("%{d}: {}", .{ sym_index, ty.fmt(self.mod) }); - try self.abbrev_resolver.putNoClobberContext(self.gpa, ty, sym_index, .{ - .mod = self.mod, - }); - break :blk self.abbrev_resolver.getContext(ty, .{ - .mod = self.mod, - }).?; + try self.abbrev_resolver.putNoClobber(self.gpa, ty.toIntern(), sym_index); + break :blk sym_index; }; log.debug("{x}: %{d} + 0", .{ offset, resolv }); try self.abbrev_relocs.append(self.gpa, .{ diff --git a/src/print_air.zig b/src/print_air.zig index 0e4f2d16cf2f..ef52b4c085eb 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -366,13 +366,7 @@ const Writer = struct { } fn writeType(w: *Writer, s: anytype, ty: Type) !void { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .inferred_alloc_const => try s.writeAll("(inferred_alloc_const)"), - .inferred_alloc_mut => try s.writeAll("(inferred_alloc_mut)"), - }, - else => try ty.print(s, w.module), - } + return ty.print(s, w.module); } fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { diff --git a/src/type.zig b/src/type.zig index d1ee185be5e8..64a5643eb6f3 100644 --- a/src/type.zig +++ b/src/type.zig @@ -11,115 +11,99 @@ const TypedValue = @import("TypedValue.zig"); const Sema = @import("Sema.zig"); const InternPool = @import("InternPool.zig"); -const file_struct = @This(); - +/// Both types and values are canonically represented by a single 32-bit integer +/// which is an index into an `InternPool` data structure. +/// This struct abstracts around this storage by providing methods only +/// applicable to types rather than values in general. pub const Type = struct { - /// We are migrating towards using this for every Type object. However, many - /// types are still represented the legacy way. This is indicated by using - /// InternPool.Index.none. ip_index: InternPool.Index, - /// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication. - /// This union takes advantage of the fact that the first page of memory - /// is unmapped, giving us 4096 possible enum tags that have no payload. - legacy: extern union { - /// If the tag value is less than Tag.no_payload_count, then no pointer - /// dereference is needed. - tag_if_small_enough: Tag, - ptr_otherwise: *Payload, - }, - pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId { return ty.zigTypeTagOrPoison(mod) catch unreachable; } pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .inferred_alloc_const, - .inferred_alloc_mut, - => return .Pointer, - }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => .Int, - .ptr_type => .Pointer, - .array_type => .Array, - .vector_type => .Vector, - .opt_type => .Optional, - .error_union_type => .ErrorUnion, - .error_set_type, .inferred_error_set_type => .ErrorSet, - .struct_type, .anon_struct_type => .Struct, - .union_type => .Union, - .opaque_type => .Opaque, - .enum_type => .Enum, - .func_type => .Fn, - .anyframe_type => .AnyFrame, - .simple_type => |s| switch (s) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - => .Float, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => .Int, + .ptr_type => .Pointer, + .array_type => .Array, + .vector_type => .Vector, + .opt_type => .Optional, + .error_union_type => .ErrorUnion, + .error_set_type, .inferred_error_set_type => .ErrorSet, + .struct_type, .anon_struct_type => .Struct, + .union_type => .Union, + .opaque_type => .Opaque, + .enum_type => .Enum, + .func_type => .Fn, + .anyframe_type => .AnyFrame, + .simple_type => |s| switch (s) { + .f16, + .f32, + .f64, + .f80, + .f128, + .c_longdouble, + => .Float, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - => .Int, - - .anyopaque => .Opaque, - .bool => .Bool, - .void => .Void, - .type => .Type, - .anyerror => .ErrorSet, - .comptime_int => .ComptimeInt, - .comptime_float => .ComptimeFloat, - .noreturn => .NoReturn, - .null => .Null, - .undefined => .Undefined, - .enum_literal => .EnumLiteral, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + => .Int, + + .anyopaque => .Opaque, + .bool => .Bool, + .void => .Void, + .type => .Type, + .anyerror => .ErrorSet, + .comptime_int => .ComptimeInt, + .comptime_float => .ComptimeFloat, + .noreturn => .NoReturn, + .null => .Null, + .undefined => .Undefined, + .enum_literal => .EnumLiteral, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - => .Enum, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + => .Enum, - .prefetch_options, - .export_options, - .extern_options, - => .Struct, + .prefetch_options, + .export_options, + .extern_options, + => .Struct, - .type_info => .Union, + .type_info => .Union, - .generic_poison => return error.GenericPoison, - }, + .generic_poison => return error.GenericPoison, - // values, not types - .undef => unreachable, - .un => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .simple_value => unreachable, - .aggregate => unreachable, + .inferred_alloc_const, .inferred_alloc_mut => return .Pointer, }, - } + + // values, not types + .undef => unreachable, + .un => unreachable, + .extern_func => unreachable, + .int => unreachable, + .float => unreachable, + .ptr => unreachable, + .opt => unreachable, + .enum_tag => unreachable, + .simple_value => unreachable, + .aggregate => unreachable, + }; } pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId { @@ -171,68 +155,6 @@ pub const Type = struct { }; } - pub fn initTag(comptime small_tag: Tag) Type { - comptime assert(@enumToInt(small_tag) < Tag.no_payload_count); - return Type{ - .ip_index = .none, - .legacy = .{ .tag_if_small_enough = small_tag }, - }; - } - - pub fn initPayload(payload: *Payload) Type { - assert(@enumToInt(payload.tag) >= Tag.no_payload_count); - return Type{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = payload }, - }; - } - - pub fn tag(ty: Type) Tag { - assert(ty.ip_index == .none); - if (@enumToInt(ty.legacy.tag_if_small_enough) < Tag.no_payload_count) { - return ty.legacy.tag_if_small_enough; - } else { - return ty.legacy.ptr_otherwise.tag; - } - } - - /// Prefer `castTag` to this. - pub fn cast(self: Type, comptime T: type) ?*T { - if (self.ip_index != .none) { - return null; - } - if (@hasField(T, "base_tag")) { - return self.castTag(T.base_tag); - } - if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { - return null; - } - inline for (@typeInfo(Tag).Enum.fields) |field| { - if (field.value < Tag.no_payload_count) - continue; - const t = @intToEnum(Tag, field.value); - if (self.legacy.ptr_otherwise.tag == t) { - if (T == t.Type()) { - return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise); - } - return null; - } - } - unreachable; - } - - pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() { - if (self.ip_index != .none) return null; - - if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) - return null; - - if (self.legacy.ptr_otherwise.tag == t) - return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise); - - return null; - } - /// If it is a function pointer, returns the function type. Otherwise returns null. pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { if (ty.zigTypeTag(mod) != .Pointer) return null; @@ -260,8 +182,8 @@ pub const Type = struct { }; } - pub fn ptrInfoIp(ty: Type, ip: InternPool) InternPool.Key.PtrType { - return switch (ip.indexToKey(ty.ip_index)) { + pub fn ptrInfoIp(ip: InternPool, ty: InternPool.Index) InternPool.Key.PtrType { + return switch (ip.indexToKey(ty)) { .ptr_type => |p| p, .opt_type => |child| switch (ip.indexToKey(child)) { .ptr_type => |p| p, @@ -272,135 +194,28 @@ pub const Type = struct { } pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { - return Payload.Pointer.Data.fromKey(ptrInfoIp(ty, mod.intern_pool)); - } - - pub fn eql(a: Type, b: Type, mod: *Module) bool { - if (a.ip_index != .none or b.ip_index != .none) { - // The InternPool data structure hashes based on Key to make interned objects - // unique. An Index can be treated simply as u32 value for the - // purpose of Type/Value hashing and equality. - return a.ip_index == b.ip_index; - } - // As a shortcut, if the small tags / addresses match, we're done. - if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true; - - switch (a.tag()) { - .inferred_alloc_const, - .inferred_alloc_mut, - => { - if (b.zigTypeTag(mod) != .Pointer) return false; - - const info_a = a.ptrInfo(mod); - const info_b = b.ptrInfo(mod); - if (!info_a.pointee_type.eql(info_b.pointee_type, mod)) - return false; - if (info_a.@"align" != info_b.@"align") - return false; - if (info_a.@"addrspace" != info_b.@"addrspace") - return false; - if (info_a.bit_offset != info_b.bit_offset) - return false; - if (info_a.host_size != info_b.host_size) - return false; - if (info_a.vector_index != info_b.vector_index) - return false; - if (info_a.@"allowzero" != info_b.@"allowzero") - return false; - if (info_a.mutable != info_b.mutable) - return false; - if (info_a.@"volatile" != info_b.@"volatile") - return false; - if (info_a.size != info_b.size) - return false; - - const sentinel_a = info_a.sentinel; - const sentinel_b = info_b.sentinel; - if (sentinel_a) |sa| { - if (sentinel_b) |sb| { - if (!sa.eql(sb, info_a.pointee_type, mod)) - return false; - } else { - return false; - } - } else { - if (sentinel_b != null) - return false; - } - - return true; - }, - } + return Payload.Pointer.Data.fromKey(ptrInfoIp(mod.intern_pool, ty.ip_index)); } - pub fn hash(self: Type, mod: *Module) u64 { - var hasher = std.hash.Wyhash.init(0); - self.hashWithHasher(&hasher, mod); - return hasher.final(); + pub fn eql(a: Type, b: Type, mod: *const Module) bool { + _ = mod; // TODO: remove this parameter + assert(a.ip_index != .none); + assert(b.ip_index != .none); + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. + return a.ip_index == b.ip_index; } - pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - if (ty.ip_index != .none) { - // The InternPool data structure hashes based on Key to make interned objects - // unique. An Index can be treated simply as u32 value for the - // purpose of Type/Value hashing and equality. - std.hash.autoHash(hasher, ty.ip_index); - return; - } - switch (ty.tag()) { - .inferred_alloc_const, - .inferred_alloc_mut, - => { - std.hash.autoHash(hasher, std.builtin.TypeId.Pointer); - - const info = ty.ptrInfo(mod); - hashWithHasher(info.pointee_type, hasher, mod); - hashSentinel(info.sentinel, info.pointee_type, hasher, mod); - std.hash.autoHash(hasher, info.@"align"); - std.hash.autoHash(hasher, info.@"addrspace"); - std.hash.autoHash(hasher, info.bit_offset); - std.hash.autoHash(hasher, info.host_size); - std.hash.autoHash(hasher, info.vector_index); - std.hash.autoHash(hasher, info.@"allowzero"); - std.hash.autoHash(hasher, info.mutable); - std.hash.autoHash(hasher, info.@"volatile"); - std.hash.autoHash(hasher, info.size); - }, - } - } - - fn hashSentinel(opt_val: ?Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - if (opt_val) |s| { - std.hash.autoHash(hasher, true); - s.hash(ty, hasher, mod); - } else { - std.hash.autoHash(hasher, false); - } + pub fn hash(ty: Type, mod: *const Module) u32 { + _ = mod; // TODO: remove this parameter + assert(ty.ip_index != .none); + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. + return std.hash.uint32(@enumToInt(ty.ip_index)); } - pub const HashContext64 = struct { - mod: *Module, - - pub fn hash(self: @This(), t: Type) u64 { - return t.hash(self.mod); - } - pub fn eql(self: @This(), a: Type, b: Type) bool { - return a.eql(b, self.mod); - } - }; - - pub const HashContext32 = struct { - mod: *Module, - - pub fn hash(self: @This(), t: Type) u32 { - return @truncate(u32, t.hash(self.mod)); - } - pub fn eql(self: @This(), a: Type, b: Type, b_index: usize) bool { - _ = b_index; - return a.eql(b, self.mod); - } - }; - pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { _ = ty; _ = unused_fmt_string; @@ -460,214 +275,208 @@ pub const Type = struct { /// Prints a name suitable for `@typeName`. pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + const sign_char: u8 = switch (int_type.signedness) { + .signed => 'i', + .unsigned => 'u', + }; + return writer.print("{c}{d}", .{ sign_char, int_type.bits }); }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| { - const sign_char: u8 = switch (int_type.signedness) { - .signed => 'i', - .unsigned => 'u', - }; - return writer.print("{c}{d}", .{ sign_char, int_type.bits }); - }, - .ptr_type => { - const info = ty.ptrInfo(mod); - - if (info.sentinel) |s| switch (info.size) { - .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, mod)}), - .Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, mod)}), - } else switch (info.size) { - .One => try writer.writeAll("*"), - .Many => try writer.writeAll("[*]"), - .C => try writer.writeAll("[*c]"), - .Slice => try writer.writeAll("[]"), - } - if (info.@"align" != 0 or info.host_size != 0 or info.vector_index != .none) { - if (info.@"align" != 0) { - try writer.print("align({d}", .{info.@"align"}); - } else { - const alignment = info.pointee_type.abiAlignment(mod); - try writer.print("align({d}", .{alignment}); - } + .ptr_type => { + const info = ty.ptrInfo(mod); - if (info.bit_offset != 0 or info.host_size != 0) { - try writer.print(":{d}:{d}", .{ info.bit_offset, info.host_size }); - } - if (info.vector_index == .runtime) { - try writer.writeAll(":?"); - } else if (info.vector_index != .none) { - try writer.print(":{d}", .{@enumToInt(info.vector_index)}); - } - try writer.writeAll(") "); - } - if (info.@"addrspace" != .generic) { - try writer.print("addrspace(.{s}) ", .{@tagName(info.@"addrspace")}); + if (info.sentinel) |s| switch (info.size) { + .One, .C => unreachable, + .Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, mod)}), + .Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, mod)}), + } else switch (info.size) { + .One => try writer.writeAll("*"), + .Many => try writer.writeAll("[*]"), + .C => try writer.writeAll("[*c]"), + .Slice => try writer.writeAll("[]"), + } + if (info.@"align" != 0 or info.host_size != 0 or info.vector_index != .none) { + if (info.@"align" != 0) { + try writer.print("align({d}", .{info.@"align"}); + } else { + const alignment = info.pointee_type.abiAlignment(mod); + try writer.print("align({d}", .{alignment}); } - if (!info.mutable) try writer.writeAll("const "); - if (info.@"volatile") try writer.writeAll("volatile "); - if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero "); - try print(info.pointee_type, writer, mod); - return; - }, - .array_type => |array_type| { - if (array_type.sentinel == .none) { - try writer.print("[{d}]", .{array_type.len}); - try print(array_type.child.toType(), writer, mod); - } else { - try writer.print("[{d}:{}]", .{ - array_type.len, - array_type.sentinel.toValue().fmtValue(array_type.child.toType(), mod), - }); - try print(array_type.child.toType(), writer, mod); + if (info.bit_offset != 0 or info.host_size != 0) { + try writer.print(":{d}:{d}", .{ info.bit_offset, info.host_size }); } - return; - }, - .vector_type => |vector_type| { - try writer.print("@Vector({d}, ", .{vector_type.len}); - try print(vector_type.child.toType(), writer, mod); - try writer.writeAll(")"); - return; - }, - .opt_type => |child| { - try writer.writeByte('?'); - return print(child.toType(), writer, mod); - }, - .error_union_type => |error_union_type| { - try print(error_union_type.error_set_type.toType(), writer, mod); - try writer.writeByte('!'); - try print(error_union_type.payload_type.toType(), writer, mod); - return; - }, - .inferred_error_set_type => |index| { - const ies = mod.inferredErrorSetPtr(index); - const func = ies.func; + if (info.vector_index == .runtime) { + try writer.writeAll(":?"); + } else if (info.vector_index != .none) { + try writer.print(":{d}", .{@enumToInt(info.vector_index)}); + } + try writer.writeAll(") "); + } + if (info.@"addrspace" != .generic) { + try writer.print("addrspace(.{s}) ", .{@tagName(info.@"addrspace")}); + } + if (!info.mutable) try writer.writeAll("const "); + if (info.@"volatile") try writer.writeAll("volatile "); + if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero "); - try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - const owner_decl = mod.declPtr(func.owner_decl); - try owner_decl.renderFullyQualifiedName(mod, writer); - try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); - }, - .error_set_type => |error_set_type| { - const names = error_set_type.names; - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(mod.intern_pool.stringToSlice(name)); + try print(info.pointee_type, writer, mod); + return; + }, + .array_type => |array_type| { + if (array_type.sentinel == .none) { + try writer.print("[{d}]", .{array_type.len}); + try print(array_type.child.toType(), writer, mod); + } else { + try writer.print("[{d}:{}]", .{ + array_type.len, + array_type.sentinel.toValue().fmtValue(array_type.child.toType(), mod), + }); + try print(array_type.child.toType(), writer, mod); + } + return; + }, + .vector_type => |vector_type| { + try writer.print("@Vector({d}, ", .{vector_type.len}); + try print(vector_type.child.toType(), writer, mod); + try writer.writeAll(")"); + return; + }, + .opt_type => |child| { + try writer.writeByte('?'); + return print(child.toType(), writer, mod); + }, + .error_union_type => |error_union_type| { + try print(error_union_type.error_set_type.toType(), writer, mod); + try writer.writeByte('!'); + try print(error_union_type.payload_type.toType(), writer, mod); + return; + }, + .inferred_error_set_type => |index| { + const ies = mod.inferredErrorSetPtr(index); + const func = ies.func; + + try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); + const owner_decl = mod.declPtr(func.owner_decl); + try owner_decl.renderFullyQualifiedName(mod, writer); + try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); + }, + .error_set_type => |error_set_type| { + const names = error_set_type.names; + try writer.writeAll("error{"); + for (names, 0..) |name, i| { + if (i != 0) try writer.writeByte(','); + try writer.writeAll(mod.intern_pool.stringToSlice(name)); + } + try writer.writeAll("}"); + }, + .simple_type => |s| return writer.writeAll(@tagName(s)), + .struct_type => |struct_type| { + if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { + const decl = mod.declPtr(struct_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + } else if (struct_type.namespace.unwrap()) |namespace_index| { + const namespace = mod.namespacePtr(namespace_index); + try namespace.renderFullyQualifiedName(mod, "", writer); + } else { + try writer.writeAll("@TypeOf(.{})"); + } + }, + .anon_struct_type => |anon_struct| { + try writer.writeAll("struct{"); + for (anon_struct.types, anon_struct.values, 0..) |field_ty, val, i| { + if (i != 0) try writer.writeAll(", "); + if (val != .none) { + try writer.writeAll("comptime "); } - try writer.writeAll("}"); - }, - .simple_type => |s| return writer.writeAll(@tagName(s)), - .struct_type => |struct_type| { - if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { - const decl = mod.declPtr(struct_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - } else if (struct_type.namespace.unwrap()) |namespace_index| { - const namespace = mod.namespacePtr(namespace_index); - try namespace.renderFullyQualifiedName(mod, "", writer); - } else { - try writer.writeAll("@TypeOf(.{})"); + if (anon_struct.names.len != 0) { + const name = mod.intern_pool.stringToSlice(anon_struct.names[i]); + try writer.writeAll(name); + try writer.writeAll(": "); } - }, - .anon_struct_type => |anon_struct| { - try writer.writeAll("struct{"); - for (anon_struct.types, anon_struct.values, 0..) |field_ty, val, i| { - if (i != 0) try writer.writeAll(", "); - if (val != .none) { - try writer.writeAll("comptime "); - } - if (anon_struct.names.len != 0) { - const name = mod.intern_pool.stringToSlice(anon_struct.names[i]); - try writer.writeAll(name); - try writer.writeAll(": "); - } - try print(field_ty.toType(), writer, mod); + try print(field_ty.toType(), writer, mod); - if (val != .none) { - try writer.print(" = {}", .{val.toValue().fmtValue(field_ty.toType(), mod)}); - } + if (val != .none) { + try writer.print(" = {}", .{val.toValue().fmtValue(field_ty.toType(), mod)}); } - try writer.writeAll("}"); - }, + } + try writer.writeAll("}"); + }, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - const decl = mod.declPtr(union_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .opaque_type => |opaque_type| { - const decl = mod.declPtr(opaque_type.decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_type => |enum_type| { - const decl = mod.declPtr(enum_type.decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .func_type => |fn_info| { - if (fn_info.is_noinline) { - try writer.writeAll("noinline "); - } - try writer.writeAll("fn("); - for (fn_info.param_types, 0..) |param_ty, i| { - if (i != 0) try writer.writeAll(", "); - if (std.math.cast(u5, i)) |index| { - if (fn_info.paramIsComptime(index)) { - try writer.writeAll("comptime "); - } - if (fn_info.paramIsNoalias(index)) { - try writer.writeAll("noalias "); - } - } - if (param_ty == .generic_poison_type) { - try writer.writeAll("anytype"); - } else { - try print(param_ty.toType(), writer, mod); + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + const decl = mod.declPtr(union_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .opaque_type => |opaque_type| { + const decl = mod.declPtr(opaque_type.decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .enum_type => |enum_type| { + const decl = mod.declPtr(enum_type.decl); + try decl.renderFullyQualifiedName(mod, writer); + }, + .func_type => |fn_info| { + if (fn_info.is_noinline) { + try writer.writeAll("noinline "); + } + try writer.writeAll("fn("); + for (fn_info.param_types, 0..) |param_ty, i| { + if (i != 0) try writer.writeAll(", "); + if (std.math.cast(u5, i)) |index| { + if (fn_info.paramIsComptime(index)) { + try writer.writeAll("comptime "); } - } - if (fn_info.is_var_args) { - if (fn_info.param_types.len != 0) { - try writer.writeAll(", "); + if (fn_info.paramIsNoalias(index)) { + try writer.writeAll("noalias "); } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (fn_info.alignment.toByteUnitsOptional()) |a| { - try writer.print("align({d}) ", .{a}); - } - if (fn_info.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(fn_info.cc)); - try writer.writeAll(") "); } - if (fn_info.return_type == .generic_poison_type) { + if (param_ty == .generic_poison_type) { try writer.writeAll("anytype"); } else { - try print(fn_info.return_type.toType(), writer, mod); + try print(param_ty.toType(), writer, mod); } - }, - .anyframe_type => |child| { - if (child == .none) return writer.writeAll("anyframe"); - try writer.writeAll("anyframe->"); - return print(child.toType(), writer, mod); - }, - - // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + } + if (fn_info.is_var_args) { + if (fn_info.param_types.len != 0) { + try writer.writeAll(", "); + } + try writer.writeAll("..."); + } + try writer.writeAll(") "); + if (fn_info.alignment.toByteUnitsOptional()) |a| { + try writer.print("align({d}) ", .{a}); + } + if (fn_info.cc != .Unspecified) { + try writer.writeAll("callconv(."); + try writer.writeAll(@tagName(fn_info.cc)); + try writer.writeAll(") "); + } + if (fn_info.return_type == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(fn_info.return_type.toType(), writer, mod); + } + }, + .anyframe_type => |child| { + if (child == .none) return writer.writeAll("anyframe"); + try writer.writeAll("anyframe->"); + return print(child.toType(), writer, mod); }, + + // values, not types + .undef => unreachable, + .un => unreachable, + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .float => unreachable, + .ptr => unreachable, + .opt => unreachable, + .enum_tag => unreachable, + .aggregate => unreachable, } } @@ -699,15 +508,10 @@ pub const Type = struct { ignore_comptime_only: bool, strat: AbiAlignmentAdvancedStrat, ) RuntimeBitsError!bool { - switch (ty.ip_index) { + return switch (ty.ip_index) { // False because it is a comptime-only type. - .empty_struct_type => return false, - - .none => switch (ty.tag()) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .empty_struct_type => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| int_type.bits != 0, .ptr_type => |ptr_type| { // Pointers to zero-bit types still have a runtime address; however, pointers @@ -802,6 +606,8 @@ pub const Type = struct { => false, .generic_poison => unreachable, + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { @@ -880,7 +686,7 @@ pub const Type = struct { .enum_tag => unreachable, .aggregate => unreachable, }, - } + }; } /// true if and only if the type has a well-defined memory layout @@ -950,6 +756,9 @@ pub const Type = struct { .type_info, .generic_poison, => false, + + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { @@ -1167,10 +976,7 @@ pub const Type = struct { .f80 => switch (target.c_type_bit_size(.longdouble)) { 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, else => { - const u80_ty: Type = .{ - .ip_index = .u80_type, - .legacy = undefined, - }; + const u80_ty: Type = .{ .ip_index = .u80_type }; return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) }; }, }, @@ -1194,6 +1000,8 @@ pub const Type = struct { .noreturn => unreachable, .generic_poison => unreachable, + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse @@ -1562,10 +1370,7 @@ pub const Type = struct { .f80 => switch (target.c_type_bit_size(.longdouble)) { 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, else => { - const u80_ty: Type = .{ - .ip_index = .u80_type, - .legacy = undefined, - }; + const u80_ty: Type = .{ .ip_index = .u80_type }; return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; }, }, @@ -1605,6 +1410,8 @@ pub const Type = struct { .type_info => unreachable, .noreturn => unreachable, .generic_poison => unreachable, + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| switch (ty.containerLayout(mod)) { .Packed => { @@ -1835,6 +1642,8 @@ pub const Type = struct { .undefined => unreachable, .enum_literal => unreachable, .generic_poison => unreachable, + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, .atomic_order => unreachable, // missing call to resolveTypeFields .atomic_rmw_op => unreachable, // missing call to resolveTypeFields @@ -1927,17 +1736,13 @@ pub const Type = struct { } pub fn isSinglePointer(ty: Type, mod: *const Module) bool { - switch (ty.ip_index) { - .none => return switch (ty.tag()) { - .inferred_alloc_const, - .inferred_alloc_mut, - => true, - }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (ty.ip_index) { + .inferred_alloc_const_type, .inferred_alloc_mut_type => true, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_info| ptr_info.size == .One, else => false, }, - } + }; } /// Asserts `ty` is a pointer. @@ -1948,11 +1753,7 @@ pub const Type = struct { /// Returns `null` if `ty` is not a pointer. pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .inferred_alloc_const, - .inferred_alloc_mut, - => .One, - }, + .inferred_alloc_const_type, .inferred_alloc_mut_type => .One, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_info| ptr_info.size, else => null, @@ -2625,10 +2426,6 @@ pub const Type = struct { while (true) switch (ty.ip_index) { .empty_struct_type => return Value.empty_struct, - .none => switch (ty.tag()) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) { @@ -2710,6 +2507,8 @@ pub const Type = struct { .undefined => return Value.undef, .generic_poison => unreachable, + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { if (mod.structPtrUnwrap(struct_type.index)) |s| { @@ -2888,6 +2687,9 @@ pub const Type = struct { .enum_literal, .type_info, => true, + + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { // A struct with no fields is not comptime-only. @@ -3343,61 +3145,56 @@ pub const Type = struct { /// Supports structs and unions. pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.haveLayout()); - assert(struct_obj.layout != .Packed); - var it = ty.iterateStructOffsets(mod); - while (it.next()) |field_offset| { - if (index == field_offset.field) - return field_offset.offset; - } - - return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); - }, + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.haveLayout()); + assert(struct_obj.layout != .Packed); + var it = ty.iterateStructOffsets(mod); + while (it.next()) |field_offset| { + if (index == field_offset.field) + return field_offset.offset; + } - .anon_struct_type => |tuple| { - var offset: u64 = 0; - var big_align: u32 = 0; + return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); + }, - for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { - if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) { - // comptime field - if (i == index) return offset; - continue; - } + .anon_struct_type => |tuple| { + var offset: u64 = 0; + var big_align: u32 = 0; - const field_align = field_ty.toType().abiAlignment(mod); - big_align = @max(big_align, field_align); - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) { + // comptime field if (i == index) return offset; - offset += field_ty.toType().abiSize(mod); + continue; } - offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); - return offset; - }, - .union_type => |union_type| { - if (!union_type.hasTag()) - return 0; - const union_obj = mod.unionPtr(union_type.index); - const layout = union_obj.getLayout(mod, true); - if (layout.tag_align >= layout.payload_align) { - // {Tag, Payload} - return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); - } else { - // {Payload, Tag} - return 0; - } - }, + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + if (i == index) return offset; + offset += field_ty.toType().abiSize(mod); + } + offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); + return offset; + }, - else => unreachable, + .union_type => |union_type| { + if (!union_type.hasTag()) + return 0; + const union_obj = mod.unionPtr(union_type.index); + const layout = union_obj.getLayout(mod, true); + if (layout.tag_align >= layout.payload_align) { + // {Tag, Payload} + return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + } else { + // {Payload, Tag} + return 0; + } }, + + else => unreachable, } } @@ -3445,25 +3242,6 @@ pub const Type = struct { return ty.ip_index == .generic_poison_type; } - /// This enum does not directly correspond to `std.builtin.TypeId` because - /// it has extra enum tags in it, as a way of using less memory. For example, - /// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types - /// but with different alignment values, in this data structure they are represented - /// with different enum tags, because the the former requires more payload data than the latter. - /// See `zigTypeTag` for the function that corresponds to `std.builtin.TypeId`. - pub const Tag = enum(usize) { - /// This is a special value that tracks a set of types that have been stored - /// to an inferred allocation. It does not support most of the normal type queries. - /// However it does respond to `isConstPtr`, `ptrSize`, `zigTypeTag`, etc. - inferred_alloc_mut, - /// Same as `inferred_alloc_mut` but the local is `var` not `const`. - inferred_alloc_const, // See last_no_payload_tag below. - // After this, the tag requires a payload. - - pub const last_no_payload_tag = Tag.inferred_alloc_const; - pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; - }; - pub fn isTuple(ty: Type, mod: *Module) bool { return switch (ty.ip_index) { .none => false, @@ -3511,14 +3289,9 @@ pub const Type = struct { }; } - /// The sub-types are named after what fields they contain. pub const Payload = struct { - tag: Tag, - /// TODO: remove this data structure since we have `InternPool.Key.PtrType`. pub const Pointer = struct { - data: Data, - pub const Data = struct { pointee_type: Type, sentinel: ?Value = null, @@ -3568,64 +3341,60 @@ pub const Type = struct { }; }; - pub const @"u1": Type = .{ .ip_index = .u1_type, .legacy = undefined }; - pub const @"u8": Type = .{ .ip_index = .u8_type, .legacy = undefined }; - pub const @"u16": Type = .{ .ip_index = .u16_type, .legacy = undefined }; - pub const @"u29": Type = .{ .ip_index = .u29_type, .legacy = undefined }; - pub const @"u32": Type = .{ .ip_index = .u32_type, .legacy = undefined }; - pub const @"u64": Type = .{ .ip_index = .u64_type, .legacy = undefined }; - pub const @"u128": Type = .{ .ip_index = .u128_type, .legacy = undefined }; - - pub const @"i8": Type = .{ .ip_index = .i8_type, .legacy = undefined }; - pub const @"i16": Type = .{ .ip_index = .i16_type, .legacy = undefined }; - pub const @"i32": Type = .{ .ip_index = .i32_type, .legacy = undefined }; - pub const @"i64": Type = .{ .ip_index = .i64_type, .legacy = undefined }; - pub const @"i128": Type = .{ .ip_index = .i128_type, .legacy = undefined }; - - pub const @"f16": Type = .{ .ip_index = .f16_type, .legacy = undefined }; - pub const @"f32": Type = .{ .ip_index = .f32_type, .legacy = undefined }; - pub const @"f64": Type = .{ .ip_index = .f64_type, .legacy = undefined }; - pub const @"f80": Type = .{ .ip_index = .f80_type, .legacy = undefined }; - pub const @"f128": Type = .{ .ip_index = .f128_type, .legacy = undefined }; - - pub const @"bool": Type = .{ .ip_index = .bool_type, .legacy = undefined }; - pub const @"usize": Type = .{ .ip_index = .usize_type, .legacy = undefined }; - pub const @"isize": Type = .{ .ip_index = .isize_type, .legacy = undefined }; - pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type, .legacy = undefined }; - pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type, .legacy = undefined }; - pub const @"void": Type = .{ .ip_index = .void_type, .legacy = undefined }; - pub const @"type": Type = .{ .ip_index = .type_type, .legacy = undefined }; - pub const @"anyerror": Type = .{ .ip_index = .anyerror_type, .legacy = undefined }; - pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type, .legacy = undefined }; - pub const @"anyframe": Type = .{ .ip_index = .anyframe_type, .legacy = undefined }; - pub const @"null": Type = .{ .ip_index = .null_type, .legacy = undefined }; - pub const @"undefined": Type = .{ .ip_index = .undefined_type, .legacy = undefined }; - pub const @"noreturn": Type = .{ .ip_index = .noreturn_type, .legacy = undefined }; - - pub const @"c_char": Type = .{ .ip_index = .c_char_type, .legacy = undefined }; - pub const @"c_short": Type = .{ .ip_index = .c_short_type, .legacy = undefined }; - pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type, .legacy = undefined }; - pub const @"c_int": Type = .{ .ip_index = .c_int_type, .legacy = undefined }; - pub const @"c_uint": Type = .{ .ip_index = .c_uint_type, .legacy = undefined }; - pub const @"c_long": Type = .{ .ip_index = .c_long_type, .legacy = undefined }; - pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type, .legacy = undefined }; - pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type, .legacy = undefined }; - pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type, .legacy = undefined }; - pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined }; - - pub const const_slice_u8: Type = .{ .ip_index = .const_slice_u8_type, .legacy = undefined }; - pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type, .legacy = undefined }; + pub const @"u1": Type = .{ .ip_index = .u1_type }; + pub const @"u8": Type = .{ .ip_index = .u8_type }; + pub const @"u16": Type = .{ .ip_index = .u16_type }; + pub const @"u29": Type = .{ .ip_index = .u29_type }; + pub const @"u32": Type = .{ .ip_index = .u32_type }; + pub const @"u64": Type = .{ .ip_index = .u64_type }; + pub const @"u128": Type = .{ .ip_index = .u128_type }; + + pub const @"i8": Type = .{ .ip_index = .i8_type }; + pub const @"i16": Type = .{ .ip_index = .i16_type }; + pub const @"i32": Type = .{ .ip_index = .i32_type }; + pub const @"i64": Type = .{ .ip_index = .i64_type }; + pub const @"i128": Type = .{ .ip_index = .i128_type }; + + pub const @"f16": Type = .{ .ip_index = .f16_type }; + pub const @"f32": Type = .{ .ip_index = .f32_type }; + pub const @"f64": Type = .{ .ip_index = .f64_type }; + pub const @"f80": Type = .{ .ip_index = .f80_type }; + pub const @"f128": Type = .{ .ip_index = .f128_type }; + + pub const @"bool": Type = .{ .ip_index = .bool_type }; + pub const @"usize": Type = .{ .ip_index = .usize_type }; + pub const @"isize": Type = .{ .ip_index = .isize_type }; + pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type }; + pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type }; + pub const @"void": Type = .{ .ip_index = .void_type }; + pub const @"type": Type = .{ .ip_index = .type_type }; + pub const @"anyerror": Type = .{ .ip_index = .anyerror_type }; + pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type }; + pub const @"anyframe": Type = .{ .ip_index = .anyframe_type }; + pub const @"null": Type = .{ .ip_index = .null_type }; + pub const @"undefined": Type = .{ .ip_index = .undefined_type }; + pub const @"noreturn": Type = .{ .ip_index = .noreturn_type }; + + pub const @"c_char": Type = .{ .ip_index = .c_char_type }; + pub const @"c_short": Type = .{ .ip_index = .c_short_type }; + pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type }; + pub const @"c_int": Type = .{ .ip_index = .c_int_type }; + pub const @"c_uint": Type = .{ .ip_index = .c_uint_type }; + pub const @"c_long": Type = .{ .ip_index = .c_long_type }; + pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type }; + pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type }; + pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type }; + pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type }; + + pub const const_slice_u8: Type = .{ .ip_index = .const_slice_u8_type }; + pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type }; pub const single_const_pointer_to_comptime_int: Type = .{ .ip_index = .single_const_pointer_to_comptime_int_type, - .legacy = undefined, - }; - pub const const_slice_u8_sentinel_0: Type = .{ - .ip_index = .const_slice_u8_sentinel_0_type, - .legacy = undefined, }; - pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type, .legacy = undefined }; + pub const const_slice_u8_sentinel_0: Type = .{ .ip_index = .const_slice_u8_sentinel_0_type }; + pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type }; - pub const generic_poison: Type = .{ .ip_index = .generic_poison_type, .legacy = undefined }; + pub const generic_poison: Type = .{ .ip_index = .generic_poison_type }; pub const err_int = Type.u16; @@ -3709,33 +3478,4 @@ pub const Type = struct { /// This is only used for comptime asserts. Bump this number when you make a change /// to packed struct layout to find out all the places in the codebase you need to edit! pub const packed_struct_layout_version = 2; - - /// This function is used in the debugger pretty formatters in tools/ to fetch the - /// Tag to Payload mapping to facilitate fancy debug printing for this type. - fn dbHelper(self: *Type, tag_to_payload_map: *map: { - const tags = @typeInfo(Tag).Enum.fields; - var fields: [tags.len]std.builtin.Type.StructField = undefined; - for (&fields, tags) |*field, t| field.* = .{ - .name = t.name, - .type = *if (t.value < Tag.no_payload_count) void else @field(Tag, t.name).Type(), - .default_value = null, - .is_comptime = false, - .alignment = 0, - }; - break :map @Type(.{ .Struct = .{ - .layout = .Extern, - .fields = &fields, - .decls = &.{}, - .is_tuple = false, - } }); - }) void { - _ = self; - _ = tag_to_payload_map; - } - - comptime { - if (builtin.mode == .Debug) { - _ = &dbHelper; - } - } }; diff --git a/src/value.zig b/src/value.zig index ea45f0f90659..3e5e98db17e0 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2159,9 +2159,7 @@ pub const Value = struct { .Null, => {}, - .Type => { - return val.toType().hashWithHasher(hasher, mod); - }, + .Type => unreachable, // handled via ip_index check above .Float => { // For hash/eql purposes, we treat floats as their IEEE integer representation. switch (ty.floatBits(mod.getTarget())) { @@ -2310,9 +2308,7 @@ pub const Value = struct { .Null, .Struct, // It sure would be nice to do something clever with structs. => |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag), - .Type => { - val.toType().hashWithHasher(hasher, mod); - }, + .Type => unreachable, // handled above with the ip_index check .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { .slice => { From e4d498cd3add8d3ccbcb4d5899769cca49a4837c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 20 May 2023 17:59:54 -0700 Subject: [PATCH 081/205] InternPool: add missing ensureCapacity call with enums --- src/InternPool.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index b9b706e499ca..5a7500cafd30 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3019,6 +3019,7 @@ pub fn getIncompleteEnum( gpa: Allocator, enum_type: Key.IncompleteEnumType, ) Allocator.Error!InternPool.IncompleteEnumType { + try ip.items.ensureUnusedCapacity(gpa, 1); switch (enum_type.tag_mode) { .auto => return getIncompleteEnumAuto(ip, gpa, enum_type), .explicit => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_explicit), @@ -3073,7 +3074,7 @@ pub fn getIncompleteEnumAuto( }; } -pub fn getIncompleteEnumExplicit( +fn getIncompleteEnumExplicit( ip: *InternPool, gpa: Allocator, enum_type: Key.IncompleteEnumType, From 25cd4bb3c9220e308cae9956dc4f579c66bf175a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 20 May 2023 15:42:21 -0400 Subject: [PATCH 082/205] Type: hack around `isNoReturn` queries for the remaining legacy tags --- src/type.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/type.zig b/src/type.zig index 64a5643eb6f3..f2fad91eba2b 100644 --- a/src/type.zig +++ b/src/type.zig @@ -826,7 +826,7 @@ pub const Type = struct { } pub fn isNoReturn(ty: Type, mod: *Module) bool { - return mod.intern_pool.isNoReturn(ty.ip_index); + return if (ty.ip_index != .none) mod.intern_pool.isNoReturn(ty.ip_index) else false; } /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. From cbf304d8c3f7f1e1746a98dcad979ecf79ed16b5 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 20 May 2023 18:24:42 -0400 Subject: [PATCH 083/205] InternPool: fix coersion issues --- src/InternPool.zig | 16 +++++++++------- src/Sema.zig | 4 ++-- src/codegen/llvm.zig | 14 ++++++++++---- src/value.zig | 17 +++++++++++++---- 4 files changed, 34 insertions(+), 17 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 5a7500cafd30..837dc96daf16 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2584,9 +2584,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .extern_func => @panic("TODO"), - .ptr => |ptr| switch (ip.items.items(.tag)[@enumToInt(ptr.ty)]) { - .type_pointer => { - assert(ptr.len == .none); + .ptr => |ptr| switch (ptr.len) { + .none => { + assert(ip.indexToKey(ptr.ty).ptr_type.size != .Slice); switch (ptr.addr) { .@"var" => |@"var"| ip.items.appendAssumeCapacity(.{ .tag = .ptr_var, @@ -2626,11 +2626,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }), } }, - .type_slice => { - assert(ptr.len != .none); + else => { + assert(ip.indexToKey(ptr.ty).ptr_type.size == .Slice); var new_key = key; - new_key.ptr.ty = @intToEnum(Index, ip.items.items(.data)[@enumToInt(ptr.ty)]); + new_key.ptr.ty = ip.slicePtrType(ptr.ty); new_key.ptr.len = .none; + assert(ip.indexToKey(new_key.ptr.ty).ptr_type.size == .Many); const ptr_index = try get(ip, gpa, new_key); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ @@ -2641,7 +2642,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }), }); }, - else => unreachable, }, .opt => |opt| { @@ -3465,10 +3465,12 @@ pub fn sliceLen(ip: InternPool, i: Index) Index { /// Given an existing value, returns the same value but with the supplied type. /// Only some combinations are allowed: +/// * identity coercion /// * int <=> int /// * int <=> enum /// * ptr <=> ptr pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { + if (ip.typeOf(val) == new_ty) return val; switch (ip.indexToKey(val)) { .int => |int| switch (ip.indexToKey(new_ty)) { .enum_type => return ip.get(gpa, .{ .enum_tag = .{ diff --git a/src/Sema.zig b/src/Sema.zig index 14383d107e54..57a88ad78240 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7836,7 +7836,7 @@ fn resolveGenericInstantiationType( const arg_val = (child_sema.resolveMaybeUndefValAllowVariables(arg) catch unreachable).?; child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, - .val = try arg_val.copy(new_decl_arena_allocator), + .val = (try arg_val.intern(arg_ty, mod)).toValue(), }; } else { child_sema.comptime_args[arg_i] = .{ @@ -16537,7 +16537,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); const new_decl = try anon_decl.finish( try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + try Value.Tag.bytes.create(anon_decl.arena(), bytes.ptr[0 .. bytes.len + 1]), 0, // default alignment ); break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 12d6a5752d9a..0f7a61f1bfbb 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3357,7 +3357,7 @@ pub const DeclGen = struct { }), }, else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .int => |int| return lowerIntAsPtr(dg, int), + .int => |int| return dg.lowerIntAsPtr(int), .ptr => |ptr| { const ptr_val = switch (ptr.addr) { .@"var" => |@"var"| ptr: { @@ -3376,7 +3376,7 @@ pub const DeclGen = struct { }, .decl => |decl| try lowerDeclRefValue(dg, tv, decl), .mut_decl => |mut_decl| try lowerDeclRefValue(dg, tv, mut_decl.decl), - .int => |int| lowerIntAsPtr(dg, mod.intern_pool.indexToKey(int).int), + .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), }; switch (ptr.len) { .none => return ptr_val, @@ -4084,8 +4084,14 @@ pub const DeclGen = struct { fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { const mod = dg.module; const target = mod.getTarget(); - if (ptr_val.ip_index != .none) switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { - .int => |int| return lowerIntAsPtr(dg, int), + if (ptr_val.ip_index != .none) return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .int => |int| dg.lowerIntAsPtr(int), + .ptr => |ptr| switch (ptr.addr) { + .@"var" => |@"var"| dg.lowerParentPtrDecl(ptr_val, @"var".owner_decl), + .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), + .mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl), + .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), + }, else => unreachable, }; switch (ptr_val.tag()) { diff --git a/src/value.zig b/src/value.zig index 3e5e98db17e0..d17e14d14649 100644 --- a/src/value.zig +++ b/src/value.zig @@ -603,7 +603,7 @@ pub const Value = struct { } pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { - if (val.ip_index != .none) return val.ip_index; + if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.ip_index, ty.ip_index); switch (val.tag()) { .slice => { const pl = val.castTag(.slice).?.data; @@ -2769,9 +2769,18 @@ pub const Value = struct { mod: *Module, ) Allocator.Error!Value { const elem_ty = ty.elemType2(mod); - const ptr_val = switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr, - else => val, + const ptr_val = switch (val.ip_index) { + .none => switch (val.tag()) { + .slice => val.castTag(.slice).?.data.ptr, + else => val, + }, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .ptr => |ptr| switch (ptr.len) { + .none => val, + else => val.slicePtr(mod), + }, + else => val, + }, }; if (ptr_val.ip_index == .none and ptr_val.tag() == .elem_ptr) { From dfd91abfe15e653cba7b61fef73340ea07c6e3e9 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 20 May 2023 23:24:39 -0400 Subject: [PATCH 084/205] InternPool: add more pointer values --- src/Air.zig | 2 +- src/InternPool.zig | 276 ++++++++++++++++++++++++++++++++----------- src/Module.zig | 4 +- src/Sema.zig | 194 ++++++++++++++++++++++++++++-- src/codegen/llvm.zig | 136 ++++++++++++++++++++- src/print_zir.zig | 2 +- src/value.zig | 82 ++++++++----- 7 files changed, 583 insertions(+), 113 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 3314f2897e2e..070cf7dc724a 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1292,7 +1292,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .try_ptr, => return air.getRefType(datas[inst].ty_pl.ty), - .interned => return ip.indexToKey(datas[inst].interned).typeOf().toType(), + .interned => return ip.typeOf(datas[inst].interned).toType(), .not, .bitcast, diff --git a/src/InternPool.zig b/src/InternPool.zig index 837dc96daf16..1c59aae39e5c 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -510,6 +510,16 @@ pub const Key = union(enum) { runtime_index: RuntimeIndex, }, int: Index, + eu_payload: Index, + opt_payload: Index, + comptime_field: Index, + elem: BaseIndex, + field: BaseIndex, + + pub const BaseIndex = struct { + base: Index, + index: u64, + }; }; }; @@ -599,6 +609,7 @@ pub const Key = union(enum) { .ptr => |ptr| { std.hash.autoHash(hasher, ptr.ty); + std.hash.autoHash(hasher, ptr.len); // Int-to-ptr pointers are hashed separately than decl-referencing pointers. // This is sound due to pointer provenance rules. std.hash.autoHash(hasher, @as(@typeInfo(Key.Ptr.Addr).Union.tag_type.?, ptr.addr)); @@ -607,6 +618,11 @@ pub const Key = union(enum) { .decl => |decl| std.hash.autoHash(hasher, decl), .mut_decl => |mut_decl| std.hash.autoHash(hasher, mut_decl), .int => |int| std.hash.autoHash(hasher, int), + .eu_payload => |eu_payload| std.hash.autoHash(hasher, eu_payload), + .opt_payload => |opt_payload| std.hash.autoHash(hasher, opt_payload), + .comptime_field => |comptime_field| std.hash.autoHash(hasher, comptime_field), + .elem => |elem| std.hash.autoHash(hasher, elem), + .field => |field| std.hash.autoHash(hasher, field), } }, @@ -719,7 +735,7 @@ pub const Key = union(enum) { .ptr => |a_info| { const b_info = b.ptr; - if (a_info.ty != b_info.ty) return false; + if (a_info.ty != b_info.ty or a_info.len != b_info.len) return false; const AddrTag = @typeInfo(Key.Ptr.Addr).Union.tag_type.?; if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false; @@ -729,6 +745,11 @@ pub const Key = union(enum) { .decl => |a_decl| a_decl == b_info.addr.decl, .mut_decl => |a_mut_decl| std.meta.eql(a_mut_decl, b_info.addr.mut_decl), .int => |a_int| a_int == b_info.addr.int, + .eu_payload => |a_eu_payload| a_eu_payload == b_info.addr.eu_payload, + .opt_payload => |a_opt_payload| a_opt_payload == b_info.addr.opt_payload, + .comptime_field => |a_comptime_field| a_comptime_field == b_info.addr.comptime_field, + .elem => |a_elem| std.meta.eql(a_elem, b_info.addr.elem), + .field => |a_field| std.meta.eql(a_field, b_info.addr.field), }; }, @@ -1375,6 +1396,26 @@ pub const Tag = enum(u8) { /// Only pointer types are allowed to have this encoding. Optional types must use /// `opt_payload` or `opt_null`. ptr_int, + /// A pointer to the payload of an error union. + /// data is Index of a pointer value to the error union. + /// In order to use this encoding, one must ensure that the `InternPool` + /// already contains the payload pointer type corresponding to this payload. + ptr_eu_payload, + /// A pointer to the payload of an optional. + /// data is Index of a pointer value to the optional. + /// In order to use this encoding, one must ensure that the `InternPool` + /// already contains the payload pointer type corresponding to this payload. + ptr_opt_payload, + /// data is extra index of PtrComptimeField, which contains the pointer type and field value. + ptr_comptime_field, + /// A pointer to an array element. + /// data is extra index of PtrBaseIndex, which contains the base array and element index. + /// In order to use this encoding, one must ensure that the `InternPool` + /// already contains the elem pointer type corresponding to this payload. + ptr_elem, + /// A pointer to a container field. + /// data is extra index of PtrBaseIndex, which contains the base container and field index. + ptr_field, /// A slice. /// data is extra index of PtrSlice, which contains the ptr and len values /// In order to use this encoding, one must ensure that the `InternPool` @@ -1753,6 +1794,17 @@ pub const PtrInt = struct { addr: Index, }; +pub const PtrComptimeField = struct { + ty: Index, + field_val: Index, +}; + +pub const PtrBaseIndex = struct { + ty: Index, + base: Index, + index: Index, +}; + pub const PtrSlice = struct { ptr: Index, len: Index, @@ -1956,10 +2008,10 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }, .type_slice => { - const ptr_ty_index = @intToEnum(Index, data); - var result = indexToKey(ip, ptr_ty_index); - result.ptr_type.size = .Slice; - return result; + const ptr_type_index = @intToEnum(Index, data); + var result = indexToKey(ip, ptr_type_index).ptr_type; + result.size = .Slice; + return .{ .ptr_type = result }; }, .type_optional => .{ .opt_type = @intToEnum(Index, data) }, @@ -2063,7 +2115,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { // The existence of `opt_payload` guarantees that the optional type will be // stored in the `InternPool`. const opt_ty = ip.getAssumeExists(.{ - .opt_type = indexToKey(ip, payload_val).typeOf(), + .opt_type = ip.typeOf(payload_val), }); return .{ .opt = .{ .ty = opt_ty, @@ -2108,14 +2160,59 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .addr = .{ .int = info.addr }, } }; }, + .ptr_eu_payload => { + const ptr_eu_index = @intToEnum(Index, data); + var ptr_type = ip.indexToKey(ip.typeOf(ptr_eu_index)).ptr_type; + ptr_type.elem_type = ip.indexToKey(ptr_type.elem_type).error_union_type.payload_type; + return .{ .ptr = .{ + .ty = ip.getAssumeExists(.{ .ptr_type = ptr_type }), + .addr = .{ .eu_payload = ptr_eu_index }, + } }; + }, + .ptr_opt_payload => { + const ptr_opt_index = @intToEnum(Index, data); + var ptr_type = ip.indexToKey(ip.typeOf(ptr_opt_index)).ptr_type; + ptr_type.elem_type = ip.indexToKey(ptr_type.elem_type).opt_type; + return .{ .ptr = .{ + .ty = ip.getAssumeExists(.{ .ptr_type = ptr_type }), + .addr = .{ .opt_payload = ptr_opt_index }, + } }; + }, + .ptr_comptime_field => { + const info = ip.extraData(PtrComptimeField, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .comptime_field = info.field_val }, + } }; + }, + .ptr_elem => { + const info = ip.extraData(PtrBaseIndex, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .elem = .{ + .base = info.base, + .index = ip.indexToKey(info.index).int.storage.u64, + } }, + } }; + }, + .ptr_field => { + const info = ip.extraData(PtrBaseIndex, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .field = .{ + .base = info.base, + .index = ip.indexToKey(info.index).int.storage.u64, + } }, + } }; + }, .ptr_slice => { const info = ip.extraData(PtrSlice, data); const ptr = ip.indexToKey(info.ptr).ptr; - var ptr_ty = ip.indexToKey(ptr.ty); - assert(ptr_ty.ptr_type.size == .Many); - ptr_ty.ptr_type.size = .Slice; + var ptr_type = ip.indexToKey(ptr.ty).ptr_type; + assert(ptr_type.size == .Many); + ptr_type.size = .Slice; return .{ .ptr = .{ - .ty = ip.getAssumeExists(ptr_ty), + .ty = ip.getAssumeExists(.{ .ptr_type = ptr_type }), .addr = ptr.addr, .len = info.len, } }; @@ -2301,9 +2398,7 @@ fn indexToKeyBigInt(ip: InternPool, limb_index: u32, positive: bool) Key { pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) { - return @intToEnum(Index, gop.index); - } + if (gop.found_existing) return @intToEnum(Index, gop.index); try ip.items.ensureUnusedCapacity(gpa, 1); switch (key) { .int_type => |int_type| { @@ -2322,11 +2417,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { if (ptr_type.size == .Slice) { var new_key = key; new_key.ptr_type.size = .Many; - const ptr_ty_index = try get(ip, gpa, new_key); + const ptr_type_index = try get(ip, gpa, new_key); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ .tag = .type_slice, - .data = @enumToInt(ptr_ty_index), + .data = @enumToInt(ptr_type_index), }); return @intToEnum(Index, ip.items.len - 1); } @@ -2584,64 +2679,98 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .extern_func => @panic("TODO"), - .ptr => |ptr| switch (ptr.len) { - .none => { - assert(ip.indexToKey(ptr.ty).ptr_type.size != .Slice); - switch (ptr.addr) { - .@"var" => |@"var"| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_var, - .data = try ip.addExtra(gpa, PtrVar{ - .ty = ptr.ty, - .init = @"var".init, - .owner_decl = @"var".owner_decl, - .lib_name = @"var".lib_name, - .flags = .{ - .is_const = @"var".is_const, - .is_threadlocal = @"var".is_threadlocal, - .is_weak_linkage = @"var".is_weak_linkage, - }, + .ptr => |ptr| { + const ptr_type = ip.indexToKey(ptr.ty).ptr_type; + switch (ptr.len) { + .none => { + assert(ptr_type.size != .Slice); + switch (ptr.addr) { + .@"var" => |@"var"| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_var, + .data = try ip.addExtra(gpa, PtrVar{ + .ty = ptr.ty, + .init = @"var".init, + .owner_decl = @"var".owner_decl, + .lib_name = @"var".lib_name, + .flags = .{ + .is_const = @"var".is_const, + .is_threadlocal = @"var".is_threadlocal, + .is_weak_linkage = @"var".is_weak_linkage, + }, + }), }), - }), - .decl => |decl| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_decl, - .data = try ip.addExtra(gpa, PtrDecl{ - .ty = ptr.ty, - .decl = decl, + .decl => |decl| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_decl, + .data = try ip.addExtra(gpa, PtrDecl{ + .ty = ptr.ty, + .decl = decl, + }), }), - }), - .mut_decl => |mut_decl| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_mut_decl, - .data = try ip.addExtra(gpa, PtrMutDecl{ - .ty = ptr.ty, - .decl = mut_decl.decl, - .runtime_index = mut_decl.runtime_index, + .mut_decl => |mut_decl| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_mut_decl, + .data = try ip.addExtra(gpa, PtrMutDecl{ + .ty = ptr.ty, + .decl = mut_decl.decl, + .runtime_index = mut_decl.runtime_index, + }), }), - }), - .int => |int| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_int, - .data = try ip.addExtra(gpa, PtrInt{ - .ty = ptr.ty, - .addr = int, + .int => |int| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_int, + .data = try ip.addExtra(gpa, PtrInt{ + .ty = ptr.ty, + .addr = int, + }), }), - }), - } - }, - else => { - assert(ip.indexToKey(ptr.ty).ptr_type.size == .Slice); - var new_key = key; - new_key.ptr.ty = ip.slicePtrType(ptr.ty); - new_key.ptr.len = .none; - assert(ip.indexToKey(new_key.ptr.ty).ptr_type.size == .Many); - const ptr_index = try get(ip, gpa, new_key); - try ip.items.ensureUnusedCapacity(gpa, 1); - ip.items.appendAssumeCapacity(.{ - .tag = .ptr_slice, - .data = try ip.addExtra(gpa, PtrSlice{ - .ptr = ptr_index, - .len = ptr.len, - }), - }); - }, + .eu_payload, .opt_payload => |data| ip.items.appendAssumeCapacity(.{ + .tag = switch (ptr.addr) { + .eu_payload => .ptr_eu_payload, + .opt_payload => .ptr_opt_payload, + else => unreachable, + }, + .data = @enumToInt(data), + }), + .comptime_field => |field_val| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_comptime_field, + .data = try ip.addExtra(gpa, PtrComptimeField{ + .ty = ptr.ty, + .field_val = field_val, + }), + }), + .elem, .field => |base_index| { + const index_index = try get(ip, gpa, .{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = base_index.index }, + } }); + try ip.items.ensureUnusedCapacity(gpa, 1); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_elem, + .data = try ip.addExtra(gpa, PtrBaseIndex{ + .ty = ptr.ty, + .base = base_index.base, + .index = index_index, + }), + }); + }, + } + }, + else => { + assert(ptr_type.size == .Slice); + var new_key = key; + new_key.ptr.ty = ip.slicePtrType(ptr.ty); + new_key.ptr.len = .none; + assert(ip.indexToKey(new_key.ptr.ty).ptr_type.size == .Many); + const ptr_index = try get(ip, gpa, new_key); + try ip.items.ensureUnusedCapacity(gpa, 1); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_slice, + .data = try ip.addExtra(gpa, PtrSlice{ + .ptr = ptr_index, + .len = ptr.len, + }), + }); + }, + } + assert(ptr.ty == ip.indexToKey(@intToEnum(Index, ip.items.len - 1)).ptr.ty); }, .opt => |opt| { @@ -3683,6 +3812,11 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .ptr_decl => @sizeOf(PtrDecl), .ptr_mut_decl => @sizeOf(PtrMutDecl), .ptr_int => @sizeOf(PtrInt), + .ptr_eu_payload => 0, + .ptr_opt_payload => 0, + .ptr_comptime_field => @sizeOf(PtrComptimeField), + .ptr_elem => @sizeOf(PtrBaseIndex), + .ptr_field => @sizeOf(PtrBaseIndex), .ptr_slice => @sizeOf(PtrSlice), .opt_null => 0, .opt_payload => 0, @@ -3757,6 +3891,10 @@ pub fn unionPtr(ip: *InternPool, index: Module.Union.Index) *Module.Union { return ip.allocated_unions.at(@enumToInt(index)); } +pub fn unionPtrConst(ip: InternPool, index: Module.Union.Index) *const Module.Union { + return ip.allocated_unions.at(@enumToInt(index)); +} + pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.Index) *Module.Fn.InferredErrorSet { return ip.allocated_inferred_error_sets.at(@enumToInt(index)); } diff --git a/src/Module.zig b/src/Module.zig index 982c568d244f..832368c0d049 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6783,7 +6783,7 @@ pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allo pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type { if (std.debug.runtime_safety and info.sentinel != .none) { - const sent_ty = mod.intern_pool.indexToKey(info.sentinel).typeOf(); + const sent_ty = mod.intern_pool.typeOf(info.sentinel); assert(sent_ty == info.child); } const i = try intern(mod, .{ .array_type = info }); @@ -6802,7 +6802,7 @@ pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error! pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type { if (std.debug.runtime_safety and info.sentinel != .none) { - const sent_ty = mod.intern_pool.indexToKey(info.sentinel).typeOf(); + const sent_ty = mod.intern_pool.typeOf(info.sentinel); assert(sent_ty == info.elem_type); } const i = try intern(mod, .{ .ptr_type = info }); diff --git a/src/Sema.zig b/src/Sema.zig index 57a88ad78240..e109da79ded8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -28473,6 +28473,178 @@ fn beginComptimePtrLoad( .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, }; }, + .eu_payload, .opt_payload => |container_ptr| blk: { + const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod); + const payload_ty = ptr.ty.toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty); + + // eu_payload_ptr and opt_payload_ptr never have a well-defined layout + if (deref.parent != null) { + deref.parent = null; + deref.ty_without_well_defined_layout = container_ty; + } + + if (deref.pointee) |*tv| { + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; + if (coerce_in_mem_ok) { + const payload_val = switch (ptr_val.tag()) { + .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { + return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); + }, + .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { + if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); + break :opt tv.val; + }, + else => unreachable, + }; + tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; + break :blk deref; + } + } + deref.pointee = null; + break :blk deref; + }, + .comptime_field => |comptime_field| blk: { + const field_ty = mod.intern_pool.typeOf(comptime_field).toType(); + break :blk ComptimePtrLoadKit{ + .parent = null, + .pointee = .{ .ty = field_ty, .val = comptime_field.toValue() }, + .is_mutable = false, + .ty_without_well_defined_layout = field_ty, + }; + }, + .elem => |elem_ptr| blk: { + const elem_ty = ptr.ty.toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null); + + // This code assumes that elem_ptrs have been "flattened" in order for direct dereference + // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that + // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" + switch (mod.intern_pool.indexToKey(elem_ptr.base)) { + .ptr => |base_ptr| switch (base_ptr.addr) { + .elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)), + else => {}, + }, + else => {}, + } + + if (elem_ptr.index != 0) { + if (elem_ty.hasWellDefinedLayout(mod)) { + if (deref.parent) |*parent| { + // Update the byte offset (in-place) + const elem_size = try sema.typeAbiSize(elem_ty); + const offset = parent.byte_offset + elem_size * elem_ptr.index; + parent.byte_offset = try sema.usizeCast(block, src, offset); + } + } else { + deref.parent = null; + deref.ty_without_well_defined_layout = elem_ty; + } + } + + // If we're loading an elem that was derived from a different type + // than the true type of the underlying decl, we cannot deref directly + const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { + const deref_elem_ty = deref.pointee.?.ty.childType(mod); + break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; + } else false; + if (!ty_matches) { + deref.pointee = null; + break :blk deref; + } + + var array_tv = deref.pointee.?; + const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); + if (maybe_array_ty) |load_ty| { + // It's possible that we're loading a [N]T, in which case we'd like to slice + // the pointee array directly from our parent array. + if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) { + const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); + deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ + .ty = try Type.array(sema.arena, N, null, elem_ty, mod), + .val = try array_tv.val.sliceArray(mod, sema.arena, elem_ptr.index, elem_ptr.index + N), + } else null; + break :blk deref; + } + } + + if (elem_ptr.index >= check_len) { + deref.pointee = null; + break :blk deref; + } + if (elem_ptr.index == check_len - 1) { + if (array_tv.ty.sentinel(mod)) |sent| { + deref.pointee = TypedValue{ + .ty = elem_ty, + .val = sent, + }; + break :blk deref; + } + } + deref.pointee = TypedValue{ + .ty = elem_ty, + .val = try array_tv.val.elemValue(mod, elem_ptr.index), + }; + break :blk deref; + }, + .field => |field_ptr| blk: { + const field_index = @intCast(u32, field_ptr.index); + const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty); + + if (container_ty.hasWellDefinedLayout(mod)) { + const struct_obj = mod.typeToStruct(container_ty); + if (struct_obj != null and struct_obj.?.layout == .Packed) { + // packed structs are not byte addressable + deref.parent = null; + } else if (deref.parent) |*parent| { + // Update the byte offset (in-place) + try sema.resolveTypeLayout(container_ty); + const field_offset = container_ty.structFieldOffset(field_index, mod); + parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); + } + } else { + deref.parent = null; + deref.ty_without_well_defined_layout = container_ty; + } + + const tv = deref.pointee orelse { + deref.pointee = null; + break :blk deref; + }; + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; + if (!coerce_in_mem_ok) { + deref.pointee = null; + break :blk deref; + } + + if (container_ty.isSlice(mod)) { + const slice_val = tv.val.castTag(.slice).?.data; + deref.pointee = switch (field_index) { + Value.Payload.Slice.ptr_index => TypedValue{ + .ty = container_ty.slicePtrFieldType(mod), + .val = slice_val.ptr, + }, + Value.Payload.Slice.len_index => TypedValue{ + .ty = Type.usize, + .val = slice_val.len, + }, + else => unreachable, + }; + } else { + const field_ty = container_ty.structFieldType(field_index, mod); + deref.pointee = TypedValue{ + .ty = field_ty, + .val = try tv.val.fieldValue(tv.ty, mod, field_index), + }; + } + break :blk deref; + }, }, else => unreachable, }, @@ -28559,11 +28731,12 @@ fn coerceArrayPtrToSlice( if (try sema.resolveMaybeUndefVal(inst)) |val| { const ptr_array_ty = sema.typeOf(inst); const array_ty = ptr_array_ty.childType(mod); - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = val, - .len = try mod.intValue(Type.usize, array_ty.arrayLen(mod)), - }); - return sema.addConstant(dest_ty, slice_val); + const slice_val = try mod.intern(.{ .ptr = .{ + .ty = dest_ty.ip_index, + .addr = mod.intern_pool.indexToKey(val.ip_index).ptr.addr, + .len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).ip_index, + } }); + return sema.addConstant(dest_ty, slice_val.toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addTyOp(.array_to_slice, dest_ty, inst); @@ -29769,6 +29942,7 @@ fn analyzeSlice( const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); const new_ptr = try sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src); + const new_ptr_ty = sema.typeOf(new_ptr); // true if and only if the end index of the slice, implicitly or explicitly, equals // the length of the underlying object being sliced. we might learn the length of the @@ -29914,7 +30088,7 @@ fn analyzeSlice( const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); - const elem_ptr = try ptr_val.elemPtr(sema.typeOf(new_ptr), sema.arena, sentinel_index, sema.mod); + const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sema.arena, sentinel_index, sema.mod); const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty, false); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, @@ -29960,7 +30134,7 @@ fn analyzeSlice( try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false); const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len); - const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo(mod); + const new_ptr_ty_info = new_ptr_ty.ptrInfo(mod); const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize(mod) != .C; if (opt_new_len_val) |new_len_val| { @@ -30009,7 +30183,11 @@ fn analyzeSlice( }; if (!new_ptr_val.isUndef(mod)) { - return sema.addConstant(return_ty, new_ptr_val); + return sema.addConstant(return_ty, (try mod.intern_pool.getCoerced( + mod.gpa, + try new_ptr_val.intern(new_ptr_ty, mod), + return_ty.ip_index, + )).toValue()); } // Special case: @as([]i32, undefined)[x..x] diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 0f7a61f1bfbb..26d03c9be09a 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3374,9 +3374,15 @@ pub const DeclGen = struct { val; break :ptr addrspace_casted_ptr; }, - .decl => |decl| try lowerDeclRefValue(dg, tv, decl), - .mut_decl => |mut_decl| try lowerDeclRefValue(dg, tv, mut_decl.decl), + .decl => |decl| try dg.lowerDeclRefValue(tv, decl), + .mut_decl => |mut_decl| try dg.lowerDeclRefValue(tv, mut_decl.decl), .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), + .eu_payload, + .opt_payload, + .elem, + .field, + => try dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0), + .comptime_field => unreachable, }; switch (ptr.len) { .none => return ptr_val, @@ -4091,6 +4097,132 @@ pub const DeclGen = struct { .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), .mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl), .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), + .eu_payload => |eu_ptr| { + const parent_llvm_ptr = try dg.lowerParentPtr(eu_ptr.toValue(), true); + + const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod); + const payload_ty = eu_ty.errorUnionPayload(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + // In this case, we represent pointer to error union the same as pointer + // to the payload. + return parent_llvm_ptr; + } + + const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; + const llvm_u32 = dg.context.intType(32); + const indices: [2]*llvm.Value = .{ + llvm_u32.constInt(0, .False), + llvm_u32.constInt(payload_offset, .False), + }; + const eu_llvm_ty = try dg.lowerType(eu_ty); + return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + .opt_payload => |opt_ptr| { + const parent_llvm_ptr = try dg.lowerParentPtr(opt_ptr.toValue(), true); + + const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod); + const payload_ty = opt_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + payload_ty.optionalReprIsPayload(mod)) + { + // In this case, we represent pointer to optional the same as pointer + // to the payload. + return parent_llvm_ptr; + } + + const llvm_u32 = dg.context.intType(32); + const indices: [2]*llvm.Value = .{ + llvm_u32.constInt(0, .False), + llvm_u32.constInt(0, .False), + }; + const opt_llvm_ty = try dg.lowerType(opt_ty); + return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + .comptime_field => unreachable, + .elem => |elem_ptr| { + const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.base.toValue(), true); + + const llvm_usize = try dg.lowerType(Type.usize); + const indices: [1]*llvm.Value = .{ + llvm_usize.constInt(elem_ptr.index, .False), + }; + const elem_llvm_ty = try dg.lowerType(ptr.ty.toType().childType(mod)); + return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + .field => |field_ptr| { + const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.base.toValue(), byte_aligned); + const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + + const field_index = @intCast(u32, field_ptr.index); + const llvm_u32 = dg.context.intType(32); + switch (parent_ty.zigTypeTag(mod)) { + .Union => { + if (parent_ty.containerLayout(mod) == .Packed) { + return parent_llvm_ptr; + } + + const layout = parent_ty.unionGetLayout(mod); + if (layout.payload_size == 0) { + // In this case a pointer to the union and a pointer to any + // (void) payload is the same. + return parent_llvm_ptr; + } + const llvm_pl_index = if (layout.tag_size == 0) + 0 + else + @boolToInt(layout.tag_align >= layout.payload_align); + const indices: [2]*llvm.Value = .{ + llvm_u32.constInt(0, .False), + llvm_u32.constInt(llvm_pl_index, .False), + }; + const parent_llvm_ty = try dg.lowerType(parent_ty); + return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + .Struct => { + if (parent_ty.containerLayout(mod) == .Packed) { + if (!byte_aligned) return parent_llvm_ptr; + const llvm_usize = dg.context.intType(target.cpu.arch.ptrBitWidth()); + const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); + // count bits of fields before this one + const prev_bits = b: { + var b: usize = 0; + for (parent_ty.structFields(mod).values()[0..field_index]) |field| { + if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + b += @intCast(usize, field.ty.bitSize(mod)); + } + break :b b; + }; + const byte_offset = llvm_usize.constInt(prev_bits / 8, .False); + const field_addr = base_addr.constAdd(byte_offset); + const final_llvm_ty = dg.context.pointerType(0); + return field_addr.constIntToPtr(final_llvm_ty); + } + + const parent_llvm_ty = try dg.lowerType(parent_ty); + if (llvmField(parent_ty, field_index, mod)) |llvm_field| { + const indices: [2]*llvm.Value = .{ + llvm_u32.constInt(0, .False), + llvm_u32.constInt(llvm_field.index, .False), + }; + return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + } else { + const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); + const indices: [1]*llvm.Value = .{llvm_index}; + return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + } + }, + .Pointer => { + assert(parent_ty.isSlice(mod)); + const indices: [2]*llvm.Value = .{ + llvm_u32.constInt(0, .False), + llvm_u32.constInt(field_index, .False), + }; + const parent_llvm_ty = try dg.lowerType(parent_ty); + return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + else => unreachable, + } + }, }, else => unreachable, }; diff --git a/src/print_zir.zig b/src/print_zir.zig index a2178bbb49fb..6c371b8b8d1d 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -1192,7 +1192,7 @@ const Writer = struct { .field => { const field_name = self.code.nullTerminatedString(extra.data.field_name_start); try self.writeInstRef(stream, extra.data.obj_ptr); - try stream.print(", {}", .{std.zig.fmtId(field_name)}); + try stream.print(", \"{}\"", .{std.zig.fmtEscapes(field_name)}); }, } try stream.writeAll(", ["); diff --git a/src/value.zig b/src/value.zig index d17e14d14649..5963b84b25fc 100644 --- a/src/value.zig +++ b/src/value.zig @@ -559,37 +559,46 @@ pub const Value = struct { /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { - switch (val.tag()) { - .bytes => { - const bytes = val.castTag(.bytes).?.data; - const adjusted_len = bytes.len - @boolToInt(ty.sentinel(mod) != null); - const adjusted_bytes = bytes[0..adjusted_len]; - return allocator.dupe(u8, adjusted_bytes); - }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - return allocator.dupe(u8, bytes); - }, - .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data), - .repeated => { - const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); - @memset(result, byte); - return result; - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - const decl_val = try decl.value(); - return decl_val.toAllocatedBytes(decl.ty, allocator, mod); + switch (val.ip_index) { + .none => switch (val.tag()) { + .bytes => { + const bytes = val.castTag(.bytes).?.data; + const adjusted_len = bytes.len - @boolToInt(ty.sentinel(mod) != null); + const adjusted_bytes = bytes[0..adjusted_len]; + return allocator.dupe(u8, adjusted_bytes); + }, + .str_lit => { + const str_lit = val.castTag(.str_lit).?.data; + const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + return allocator.dupe(u8, bytes); + }, + .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data), + .repeated => { + const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod)); + const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); + @memset(result, byte); + return result; + }, + .decl_ref => { + const decl_index = val.castTag(.decl_ref).?.data; + const decl = mod.declPtr(decl_index); + const decl_val = try decl.value(); + return decl_val.toAllocatedBytes(decl.ty, allocator, mod); + }, + .the_only_possible_value => return &[_]u8{}, + .slice => { + const slice = val.castTag(.slice).?.data; + return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod); + }, + else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), }, - .the_only_possible_value => return &[_]u8{}, - .slice => { - const slice = val.castTag(.slice).?.data; - return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .ptr => |ptr| switch (ptr.len) { + .none => unreachable, + else => return arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), + }, + else => unreachable, }, - else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), } } @@ -605,6 +614,16 @@ pub const Value = struct { pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.ip_index, ty.ip_index); switch (val.tag()) { + .elem_ptr => { + const pl = val.castTag(.elem_ptr).?.data; + return mod.intern(.{ .ptr = .{ + .ty = ty.ip_index, + .addr = .{ .elem = .{ + .base = pl.array_ptr.ip_index, + .index = pl.index, + } }, + } }); + }, .slice => { const pl = val.castTag(.slice).?.data; const ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod); @@ -2601,7 +2620,10 @@ pub const Value = struct { .@"var" => unreachable, .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), - .int => unreachable, + .int, .eu_payload, .opt_payload => unreachable, + .comptime_field => |field_val| field_val.toValue().elemValue(mod, index), + .elem => |elem| elem.base.toValue().elemValue(mod, index + elem.index), + .field => unreachable, }, .aggregate => |aggregate| switch (aggregate.storage) { .elems => |elems| elems[index].toValue(), From 9584feae5f27a8b987975d8fe8242e2169098a75 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 21 May 2023 00:43:19 -0400 Subject: [PATCH 085/205] InternPool: fix logic bugs --- src/InternPool.zig | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 1c59aae39e5c..652a7ecdea92 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2415,9 +2415,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(ptr_type.elem_type != .none); if (ptr_type.size == .Slice) { + _ = ip.map.pop(); var new_key = key; new_key.ptr_type.size = .Many; const ptr_type_index = try get(ip, gpa, new_key); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ .tag = .type_slice, @@ -2737,10 +2739,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }), }), .elem, .field => |base_index| { + _ = ip.map.pop(); const index_index = try get(ip, gpa, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = base_index.index }, } }); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ .tag = .ptr_elem, @@ -2755,11 +2759,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, else => { assert(ptr_type.size == .Slice); + _ = ip.map.pop(); var new_key = key; new_key.ptr.ty = ip.slicePtrType(ptr.ty); new_key.ptr.len = .none; assert(ip.indexToKey(new_key.ptr.ty).ptr_type.size == .Many); const ptr_index = try get(ip, gpa, new_key); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ .tag = .ptr_slice, @@ -3148,7 +3154,6 @@ pub fn getIncompleteEnum( gpa: Allocator, enum_type: Key.IncompleteEnumType, ) Allocator.Error!InternPool.IncompleteEnumType { - try ip.items.ensureUnusedCapacity(gpa, 1); switch (enum_type.tag_mode) { .auto => return getIncompleteEnumAuto(ip, gpa, enum_type), .explicit => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_explicit), @@ -3180,6 +3185,7 @@ pub fn getIncompleteEnumAuto( const extra_fields_len: u32 = @typeInfo(EnumAuto).Struct.fields.len; try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + enum_type.fields_len); + try ip.items.ensureUnusedCapacity(gpa, 1); const extra_index = ip.addExtraAssumeCapacity(EnumAuto{ .decl = enum_type.decl, @@ -3227,6 +3233,7 @@ fn getIncompleteEnumExplicit( const extra_fields_len: u32 = @typeInfo(EnumExplicit).Struct.fields.len; try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + reserved_len); + try ip.items.ensureUnusedCapacity(gpa, 1); const extra_index = ip.addExtraAssumeCapacity(EnumExplicit{ .decl = enum_type.decl, From be1b23120648dff5e23d67b089c10b479564bffd Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 21 May 2023 03:57:12 -0400 Subject: [PATCH 086/205] InternPool: add missing logic --- src/InternPool.zig | 45 +++++++++++++++++++++++++++++++++++++---- src/Sema.zig | 19 +++++++++--------- src/value.zig | 50 ++++++++++++++++++++++++++++++---------------- 3 files changed, 84 insertions(+), 30 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 652a7ecdea92..ecc7b7a38320 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1517,6 +1517,8 @@ pub const Tag = enum(u8) { /// 0. name: NullTerminatedString for each names_len pub const ErrorSet = struct { names_len: u32, + /// Maps error names to declaration index. + names_map: MapIndex, }; /// Trailing: @@ -2024,6 +2026,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { const names = ip.extra.items[error_set.end..][0..names_len]; return .{ .error_set_type = .{ .names = @ptrCast([]const NullTerminatedString, names), + .names_map = error_set.data.names_map.toOptional(), } }; }, .type_inferred_error_set => .{ @@ -2518,6 +2521,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .tag = .type_error_set, .data = ip.addExtraAssumeCapacity(ErrorSet{ .names_len = names_len, + .names_map = names_map, }), }); ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, error_set_type.names)); @@ -3605,15 +3609,34 @@ pub fn sliceLen(ip: InternPool, i: Index) Index { /// * int <=> int /// * int <=> enum /// * ptr <=> ptr +/// * null_value => opt +/// * payload => opt pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { - if (ip.typeOf(val) == new_ty) return val; + const old_ty = ip.typeOf(val); + if (old_ty == new_ty) return val; switch (ip.indexToKey(val)) { .int => |int| switch (ip.indexToKey(new_ty)) { + .simple_type => |simple_type| switch (simple_type) { + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + => return getCoercedInts(ip, gpa, int, new_ty), + else => {}, + }, + .int_type => return getCoercedInts(ip, gpa, int, new_ty), .enum_type => return ip.get(gpa, .{ .enum_tag = .{ .ty = new_ty, .int = val, } }), - else => return getCoercedInts(ip, gpa, int, new_ty), + else => {}, }, .enum_tag => |enum_tag| { // Assume new_ty is an integer type. @@ -3624,10 +3647,24 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .ty = new_ty, .addr = ptr.addr, } }), - else => unreachable, + else => {}, }, - else => unreachable, + else => {}, + } + switch (ip.indexToKey(new_ty)) { + .opt_type => |child_ty| switch (val) { + .null_value => return ip.get(gpa, .{ .opt = .{ + .ty = new_ty, + .val = .none, + } }), + else => return ip.get(gpa, .{ .opt = .{ + .ty = new_ty, + .val = try ip.getCoerced(gpa, val, child_ty), + } }), + }, + else => {}, } + unreachable; } /// Asserts `val` has an integer type. diff --git a/src/Sema.zig b/src/Sema.zig index e109da79ded8..bc74e5b2dbfe 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8480,13 +8480,10 @@ fn zirOptionalPayload( }; if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.isNull(mod)) { - return sema.fail(block, src, "unable to unwrap null", .{}); - } - if (val.castTag(.opt_payload)) |payload| { - return sema.addConstant(result_ty, payload.data); - } - return sema.addConstant(result_ty, val); + return if (val.optionalValue(mod)) |payload| + sema.addConstant(result_ty, payload) + else + sema.fail(block, src, "unable to unwrap null", .{}); } try sema.requireRuntimeBlock(block, src, null); @@ -18929,7 +18926,7 @@ fn zirReify( if (ptr_size == .One or ptr_size == .C) { return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); } - const sentinel_ptr_val = sentinel_val.castTag(.opt_payload).?.data; + const sentinel_ptr_val = sentinel_val.optionalValue(mod).?; const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, .pointee_type = elem_ty, @@ -28807,7 +28804,11 @@ fn coerceCompatiblePtrs( return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } // The comptime Value representation is compatible with both types. - return sema.addConstant(dest_ty, val); + return sema.addConstant(dest_ty, (try mod.intern_pool.getCoerced( + mod.gpa, + try val.intern(inst_ty, mod), + dest_ty.ip_index, + )).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod); diff --git a/src/value.zig b/src/value.zig index 5963b84b25fc..0277f0bdb23b 100644 --- a/src/value.zig +++ b/src/value.zig @@ -718,18 +718,25 @@ pub const Value = struct { }, else => unreachable, }; - const enum_type = ip.indexToKey(ty.ip_index).enum_type; - if (enum_type.values.len != 0) { - return enum_type.values[field_index].toValue(); - } else { - // Field index and integer values are the same. - return mod.intValue(enum_type.tag_ty.toType(), field_index); - } + return switch (ip.indexToKey(ty.ip_index)) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_type => |enum_type| if (enum_type.values.len != 0) + enum_type.values[field_index].toValue() + else // Field index and integer values are the same. + mod.intValue(enum_type.tag_ty.toType(), field_index), + else => unreachable, + }; }, - else => { - const enum_type = ip.indexToKey(ip.typeOf(val.ip_index)).enum_type; - const int = try ip.getCoerced(mod.gpa, val.ip_index, enum_type.tag_ty); - return int.toValue(); + else => return switch (ip.indexToKey(ip.typeOf(val.ip_index))) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_type => |enum_type| (try ip.getCoerced( + mod.gpa, + val.ip_index, + enum_type.tag_ty, + )).toValue(), + else => unreachable, }, } } @@ -2906,7 +2913,7 @@ pub const Value = struct { inline .u64, .i64 => |x| x == 0, }, .opt => |opt| opt.val == .none, - else => unreachable, + else => false, }, }; } @@ -2949,11 +2956,20 @@ pub const Value = struct { /// Value of the optional, null if optional has no payload. pub fn optionalValue(val: Value, mod: *const Module) ?Value { - if (val.isNull(mod)) return null; - - // Valid for optional representation to be the direct value - // and not use opt_payload. - return if (val.castTag(.opt_payload)) |p| p.data else val; + return switch (val.ip_index) { + .none => if (val.isNull(mod)) null + // Valid for optional representation to be the direct value + // and not use opt_payload. + else if (val.castTag(.opt_payload)) |p| p.data else val, + .null_value => null, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .opt => |opt| switch (opt.val) { + .none => null, + else => opt.val.toValue(), + }, + else => unreachable, + }, + }; } /// Valid for all types. Asserts the value is not undefined. From 84099e50fc0af3719f3819c6c2d37dedba1aaae4 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 21 May 2023 03:57:23 -0400 Subject: [PATCH 087/205] TypedValue: fix debug print crashes --- src/TypedValue.zig | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 144b7ebf9d48..be535ad324bf 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -295,6 +295,9 @@ pub fn print( }, .eu_payload_ptr => { try writer.writeAll("&"); + if (level == 0) { + return writer.writeAll("(ptr)"); + } const data = val.castTag(.eu_payload_ptr).?.data; @@ -320,6 +323,10 @@ pub fn print( return; }, .opt_payload_ptr => { + if (level == 0) { + return writer.writeAll("&(ptr)"); + } + const data = val.castTag(.opt_payload_ptr).?.data; var ty_val: Value.Payload.Ty = .{ @@ -359,6 +366,10 @@ pub fn print( inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), }, .enum_tag => |enum_tag| { + if (level == 0) { + return writer.writeAll("(enum)"); + } + try writer.writeAll("@intToEnum("); try print(.{ .ty = Type.type, From a6fcf469fc8982d36597586ac0dbba94027b2429 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 21 May 2023 04:29:34 -0400 Subject: [PATCH 088/205] Value: remove legacy type values --- src/Module.zig | 12 ++++++------ src/Sema.zig | 22 +++++++++++----------- src/TypedValue.zig | 15 ++------------- src/link.zig | 2 +- src/value.zig | 12 ++---------- 5 files changed, 22 insertions(+), 41 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 832368c0d049..f4bf48712896 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -841,16 +841,16 @@ pub const Decl = struct { pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex { if (!decl.owns_tv) return .none; - const ty = (decl.val.castTag(.ty) orelse return .none).data; - return mod.intern_pool.indexToStructType(ty.ip_index); + if (decl.val.ip_index == .none) return .none; + return mod.intern_pool.indexToStructType(decl.val.ip_index); } /// If the Decl has a value and it is a union, return it, /// otherwise null. pub fn getUnion(decl: *Decl, mod: *Module) ?*Union { if (!decl.owns_tv) return null; - const ty = (decl.val.castTag(.ty) orelse return null).data; - return mod.typeToUnion(ty); + if (decl.val.ip_index == .none) return null; + return mod.typeToUnion(decl.val.toType()); } /// If the Decl has a value and it is a function, return it, @@ -4695,8 +4695,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)}); } - decl.ty = Type.type; - decl.val = try Value.Tag.ty.create(decl_arena_allocator, ty); + decl.ty = InternPool.Index.type_type.toType(); + decl.val = ty.toValue(); decl.@"align" = 0; decl.@"linksection" = null; decl.has_tv = true; diff --git a/src/Sema.zig b/src/Sema.zig index bc74e5b2dbfe..cdf02e7c724a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6131,7 +6131,7 @@ fn lookupInNamespace( continue; } try sema.ensureDeclAnalyzed(sub_usingnamespace_decl_index); - const ns_ty = sub_usingnamespace_decl.val.castTag(.ty).?.data; + const ns_ty = sub_usingnamespace_decl.val.toType(); const sub_ns = ns_ty.getNamespace(mod).?; try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope(mod)); } @@ -16131,7 +16131,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // address_space: AddressSpace try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace")), // child: type, - try Value.Tag.ty.create(sema.arena, info.pointee_type), + info.pointee_type.toValue(), // is_allowzero: bool, Value.makeBool(info.@"allowzero"), // sentinel: ?*const anyopaque, @@ -16152,7 +16152,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // len: comptime_int, field_values[0] = try mod.intValue(Type.comptime_int, info.len); // child: type, - field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); + field_values[1] = info.elem_type.toValue(); // sentinel: ?*const anyopaque, field_values[2] = try sema.optRefValue(block, info.elem_type, info.sentinel); @@ -16170,7 +16170,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // len: comptime_int, field_values[0] = try mod.intValue(Type.comptime_int, info.len); // child: type, - field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); + field_values[1] = info.elem_type.toValue(); return sema.addConstant( type_info_ty, @@ -16183,7 +16183,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Optional => { const field_values = try sema.arena.alloc(Value, 1); // child: type, - field_values[0] = try Value.Tag.ty.create(sema.arena, ty.optionalChild(mod)); + field_values[0] = ty.optionalChild(mod).toValue(); return sema.addConstant( type_info_ty, @@ -16286,9 +16286,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ErrorUnion => { const field_values = try sema.arena.alloc(Value, 2); // error_set: type, - field_values[0] = try Value.Tag.ty.create(sema.arena, ty.errorUnionSet(mod)); + field_values[0] = ty.errorUnionSet(mod).toValue(); // payload: type, - field_values[1] = try Value.Tag.ty.create(sema.arena, ty.errorUnionPayload(mod)); + field_values[1] = ty.errorUnionPayload(mod).toValue(); return sema.addConstant( type_info_ty, @@ -16436,7 +16436,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // type: type, - try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty), + field.ty.toValue(), // alignment: comptime_int, try mod.intValue(Type.comptime_int, alignment), }; @@ -16465,7 +16465,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod)); const enum_tag_ty_val = if (union_ty.unionTagType(mod)) |tag_ty| v: { - const ty_val = try Value.Tag.ty.create(sema.arena, tag_ty); + const ty_val = tag_ty.toValue(); break :v try Value.Tag.opt_payload.create(sema.arena, ty_val); } else Value.null; @@ -16602,7 +16602,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // type: type, - try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty), + field.ty.toValue(), // default_value: ?*const anyopaque, try default_val_ptr.copy(fields_anon_decl.arena()), // is_comptime: bool, @@ -16641,7 +16641,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const struct_obj = mod.typeToStruct(struct_ty).?; assert(struct_obj.haveLayout()); assert(struct_obj.backing_int_ty.isInt(mod)); - const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty); + const backing_int_ty_val = struct_obj.backing_int_ty.toValue(); break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val); } else { break :blk Value.null; diff --git a/src/TypedValue.zig b/src/TypedValue.zig index be535ad324bf..569c1430d5b6 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -103,7 +103,6 @@ pub fn print( return writer.writeAll(" }"); }, .the_only_possible_value => return writer.writeAll("0"), - .ty => return val.castTag(.ty).?.data.print(writer, mod), .lazy_align => { const sub_ty = val.castTag(.lazy_align).?.data; const x = sub_ty.abiAlignment(mod); @@ -301,15 +300,10 @@ pub fn print( const data = val.castTag(.eu_payload_ptr).?.data; - var ty_val: Value.Payload.Ty = .{ - .base = .{ .tag = .ty }, - .data = ty, - }; - try writer.writeAll("@as("); try print(.{ .ty = Type.type, - .val = Value.initPayload(&ty_val.base), + .val = ty.toValue(), }, writer, level - 1, mod); try writer.writeAll(", &(payload of "); @@ -329,15 +323,10 @@ pub fn print( const data = val.castTag(.opt_payload_ptr).?.data; - var ty_val: Value.Payload.Ty = .{ - .base = .{ .tag = .ty }, - .data = ty, - }; - try writer.writeAll("@as("); try print(.{ .ty = Type.type, - .val = Value.initPayload(&ty_val.base), + .val = ty.toValue(), }, writer, level - 1, mod); try writer.writeAll(", &(payload of "); diff --git a/src/link.zig b/src/link.zig index ac764f06f842..1f34b0f760a3 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1124,7 +1124,7 @@ pub const File = struct { pub fn initDecl(kind: Kind, decl: ?Module.Decl.Index, mod: *Module) LazySymbol { return .{ .kind = kind, .ty = if (decl) |decl_index| - mod.declPtr(decl_index).val.castTag(.ty).?.data + mod.declPtr(decl_index).val.toType() else Type.anyerror }; } diff --git a/src/value.zig b/src/value.zig index 0277f0bdb23b..b1c94d46b5e2 100644 --- a/src/value.zig +++ b/src/value.zig @@ -39,7 +39,6 @@ pub const Value = struct { empty_array, // See last_no_payload_tag below. // After this, the tag requires a payload. - ty, function, extern_fn, /// A comptime-known pointer can point to the address of a global @@ -141,7 +140,6 @@ pub const Value = struct { .str_lit => Payload.StrLit, .slice => Payload.Slice, - .ty, .lazy_align, .lazy_size, => Payload.Ty, @@ -255,7 +253,7 @@ pub const Value = struct { .empty_array, => unreachable, - .ty, .lazy_align, .lazy_size => { + .lazy_align, .lazy_size => { const payload = self.cast(Payload.Ty).?; const new_payload = try arena.create(Payload.Ty); new_payload.* = .{ @@ -472,7 +470,6 @@ pub const Value = struct { return out_stream.writeAll("(union value)"); }, .the_only_possible_value => return out_stream.writeAll("(the only possible value)"), - .ty => return val.castTag(.ty).?.data.dump("", options, out_stream), .lazy_align => { try out_stream.writeAll("@alignOf("); try val.castTag(.lazy_align).?.data.dump("", options, out_stream); @@ -695,12 +692,7 @@ pub const Value = struct { /// Asserts that the value is representable as a type. pub fn toType(self: Value) Type { - if (self.ip_index != .none) return self.ip_index.toType(); - return switch (self.tag()) { - .ty => self.castTag(.ty).?.data, - - else => unreachable, - }; + return self.ip_index.toType(); } pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { From a7c3ca35312094bcfaecd80cdd2b95ba89386773 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 21 May 2023 19:22:50 -0400 Subject: [PATCH 089/205] InternPool: add lldb pretty printing for indices --- src/InternPool.zig | 128 +++++++++++++++++++++++++++++++++- src/codegen/llvm.zig | 4 +- tools/lldb_pretty_printers.py | 86 +++++++++++++++++++---- 3 files changed, 203 insertions(+), 15 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index ecc7b7a38320..c15e1afeaddc 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -56,6 +56,7 @@ string_table: std.HashMapUnmanaged( std.hash_map.default_max_load_percentage, ) = .{}, +const builtin = @import("builtin"); const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -1057,6 +1058,131 @@ pub const Index = enum(u32) { return std.hash.uint32(@enumToInt(a)); } }; + + /// This function is used in the debugger pretty formatters in tools/ to fetch the + /// Tag to encoding mapping to facilitate fancy debug printing for this type. + fn dbHelper(self: *Index, tag_to_encoding_map: *struct { + const DataIsIndex = struct { data: Index }; + const DataIsExtraIndexOfEnumExplicit = struct { + const @"data.fields_len" = opaque {}; + data: *EnumExplicit, + @"trailing.names.len": *@"data.fields_len", + @"trailing.values.len": *@"data.fields_len", + trailing: struct { + names: []NullTerminatedString, + values: []Index, + }, + }; + const DataIsExtraIndexOfTypeStructAnon = struct { + const @"data.fields_len" = opaque {}; + data: *TypeStructAnon, + @"trailing.types.len": *@"data.fields_len", + @"trailing.values.len": *@"data.fields_len", + @"trailing.names.len": *@"data.fields_len", + trailing: struct { + types: []Index, + values: []Index, + names: []NullTerminatedString, + }, + }; + + type_int_signed: struct { data: u32 }, + type_int_unsigned: struct { data: u32 }, + type_array_big: struct { data: *Array }, + type_array_small: struct { data: *Vector }, + type_vector: struct { data: *Vector }, + type_pointer: struct { data: *Pointer }, + type_slice: DataIsIndex, + type_optional: DataIsIndex, + type_anyframe: DataIsIndex, + type_error_union: struct { data: *Key.ErrorUnionType }, + type_error_set: struct { + const @"data.names_len" = opaque {}; + data: *ErrorSet, + @"trailing.names.len": *@"data.names_len", + trailing: struct { names: []NullTerminatedString }, + }, + type_inferred_error_set: struct { data: Module.Fn.InferredErrorSet.Index }, + type_enum_auto: struct { + const @"data.fields_len" = opaque {}; + data: *EnumAuto, + @"trailing.names.len": *@"data.fields_len", + trailing: struct { names: []NullTerminatedString }, + }, + type_enum_explicit: DataIsExtraIndexOfEnumExplicit, + type_enum_nonexhaustive: DataIsExtraIndexOfEnumExplicit, + simple_type: struct { data: SimpleType }, + type_opaque: struct { data: *Key.OpaqueType }, + type_struct: struct { data: Module.Struct.OptionalIndex }, + type_struct_ns: struct { data: Module.Namespace.Index }, + type_struct_anon: DataIsExtraIndexOfTypeStructAnon, + type_tuple_anon: DataIsExtraIndexOfTypeStructAnon, + type_union_tagged: struct { data: Module.Union.Index }, + type_union_untagged: struct { data: Module.Union.Index }, + type_union_safety: struct { data: Module.Union.Index }, + type_function: struct { + const @"data.params_len" = opaque {}; + data: *TypeFunction, + @"trailing.param_types.len": *@"data.params_len", + trailing: struct { param_types: []Index }, + }, + + undef: DataIsIndex, + simple_value: struct { data: SimpleValue }, + ptr_var: struct { data: *PtrVar }, + ptr_mut_decl: struct { data: *PtrMutDecl }, + ptr_decl: struct { data: *PtrDecl }, + ptr_int: struct { data: *PtrInt }, + ptr_eu_payload: DataIsIndex, + ptr_opt_payload: DataIsIndex, + ptr_comptime_field: struct { data: *PtrComptimeField }, + ptr_elem: struct { data: *PtrBaseIndex }, + ptr_field: struct { data: *PtrBaseIndex }, + ptr_slice: struct { data: *PtrSlice }, + opt_payload: DataIsIndex, + opt_null: DataIsIndex, + int_u8: struct { data: u8 }, + int_u16: struct { data: u16 }, + int_u32: struct { data: u32 }, + int_i32: struct { data: i32 }, + int_usize: struct { data: u32 }, + int_comptime_int_u32: struct { data: u32 }, + int_comptime_int_i32: struct { data: i32 }, + int_small: struct { data: *IntSmall }, + int_positive: struct { data: u32 }, + int_negative: struct { data: u32 }, + enum_tag: struct { data: *Key.EnumTag }, + float_f16: struct { data: f16 }, + float_f32: struct { data: f32 }, + float_f64: struct { data: *Float64 }, + float_f80: struct { data: *Float80 }, + float_f128: struct { data: *Float128 }, + float_c_longdouble_f80: struct { data: *Float80 }, + float_c_longdouble_f128: struct { data: *Float128 }, + float_comptime_float: struct { data: *Float128 }, + extern_func: struct { data: void }, + func: struct { data: void }, + only_possible_value: DataIsIndex, + union_value: struct { data: *Key.Union }, + aggregate: struct { data: *Aggregate }, + repeated: struct { data: *Repeated }, + }) void { + _ = self; + @setEvalBranchQuota(10_000); + inline for (@typeInfo(Tag).Enum.fields) |tag| { + inline for (@typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields) |entry| { + if (comptime std.mem.eql(u8, tag.name, entry.name)) break; + } else { + @compileError(@typeName(Tag) ++ "." ++ tag.name ++ " missing dbHelper tag_to_encoding_map entry"); + } + } + } + + comptime { + if (builtin.mode == .Debug) { + _ = dbHelper; + } + } }; pub const static_keys = [_]Key{ @@ -1776,7 +1902,7 @@ pub const PtrVar = struct { is_const: bool, is_threadlocal: bool, is_weak_linkage: bool, - unused: u29 = undefined, + _: u29 = 0, }; }; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 26d03c9be09a..1eb6afd994ac 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4693,8 +4693,8 @@ pub const FuncGen = struct { } fn resolveValue(self: *FuncGen, tv: TypedValue) !*llvm.Value { - const llvm_val = try self.dg.lowerValue(tv); const mod = self.dg.module; + const llvm_val = try self.dg.lowerValue(tv); if (!isByRef(tv.ty, mod)) return llvm_val; // We have an LLVM value but we need to create a global constant and @@ -4972,8 +4972,8 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const callee_ty = self.typeOf(pl_op.operand); const mod = self.dg.module; + const callee_ty = self.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => callee_ty.childType(mod), diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index e0b84e1b4155..6cccf77ee0e9 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -115,7 +115,7 @@ def get_child_index(self, name): try: return int(name.removeprefix('[').removesuffix(']')) except: return -1 def get_child_at_index(self, index): - if index < 0 or index >= self.len: return None + if index not in range(self.len): return None try: return self.ptr.CreateChildAtOffset('[%d]' % index, index * self.elem_size, self.elem_type) except: return None @@ -176,7 +176,7 @@ def num_children(self): return 1 + (self.payload is not None) def get_child_index(self, name): try: return ('tag', 'payload').index(name) except: return -1 - def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index >= 0 and index < 2 else None + def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index in range(2) else None # Define Zig Standard Library @@ -196,7 +196,7 @@ def get_child_index(self, name): except: return -1 def get_child_at_index(self, index): try: - if index < 0 or index >= self.len: return None + if index not in range(self.len): return None prealloc_item_count = len(self.prealloc_segment) if index < prealloc_item_count: return self.prealloc_segment.child[index] prealloc_exp = prealloc_item_count.bit_length() - 1 @@ -231,7 +231,7 @@ def get_child_index(self, name): except: return -1 def get_child_at_index(self, index): try: - if index < 0 or index >= self.len: return None + if index not in range(self.len): return None offset = 0 data = lldb.SBData() for field in self.entry_type.fields: @@ -266,7 +266,7 @@ def get_child_index(self, name): except: return -1 def get_child_at_index(self, index): try: - if index < 0 or index >= self.len: return None + if index not in range(self.len): return None data = lldb.SBData() for field in self.entry_type.fields: field_type = field.type.GetPointeeType() @@ -328,7 +328,7 @@ def update(self): def has_children(self): return self.num_children() != 0 def num_children(self): return len(self.children) def get_child_index(self, name): return self.indices.get(name) - def get_child_at_index(self, index): return self.children[index].deref if index >= 0 and index < len(self.children) else None + def get_child_at_index(self, index): return self.children[index].deref if index in range(len(self.children)) else None # Define Zig Stage2 Compiler @@ -345,7 +345,7 @@ def num_children(self): return 2 def get_child_index(self, name): try: return ('tag', 'payload').index(name) except: return -1 - def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index >= 0 and index < 2 else None + def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index in range(2) else None def Inst_Ref_SummaryProvider(value, _=None): members = value.type.enum_members @@ -392,7 +392,7 @@ def num_children(self): return 1 + (self.payload is not None) def get_child_index(self, name): try: return ('tag', 'payload').index(name) except: return -1 - def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index >= 0 and index < 2 else None + def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index in range(2) else None def Module_Decl_name(decl): error = lldb.SBError() @@ -407,6 +407,71 @@ def Module_Decl_RenderFullyQualifiedName(decl): return '.'.join((Module_Namespac def OwnerDecl_RenderFullyQualifiedName(payload): return Module_Decl_RenderFullyQualifiedName(payload.GetChildMemberWithName('owner_decl').GetChildMemberWithName('decl')) +def InternPool_Find(thread): + for frame in thread: + ip = frame.FindVariable('ip') or frame.FindVariable('intern_pool') + if ip: return ip + mod = frame.FindVariable('mod') or frame.FindVariable('module') + if mod: + ip = mod.GetChildMemberWithName('intern_pool') + if ip: return ip + +class InternPool_Index_SynthProvider: + def __init__(self, value, _=None): self.value = value + def update(self): + try: + index_type = self.value.type + for helper in self.value.target.FindFunctions('%s.dbHelper' % index_type.name, lldb.eFunctionNameTypeFull): + ptr_self_type, ptr_tag_to_encoding_map_type = helper.function.type.GetFunctionArgumentTypes() + if ptr_self_type.GetPointeeType() == index_type: break + else: return + tag_to_encoding_map = {field.name: field.type for field in ptr_tag_to_encoding_map_type.GetPointeeType().fields} + + ip = InternPool_Find(self.value.thread) + if not ip: return + self.item = ip.GetChildMemberWithName('items').GetChildAtIndex(self.value.unsigned) + extra = ip.GetChildMemberWithName('extra').GetChildMemberWithName('items') + self.tag = self.item.GetChildMemberWithName('tag').Clone('tag') + self.data = None + self.trailing = None + data = self.item.GetChildMemberWithName('data') + encoding_type = tag_to_encoding_map[self.tag.value] + dynamic_values = {} + for encoding_field in encoding_type.fields: + if encoding_field.name == 'data': + if encoding_field.type.IsPointerType(): + data_type = encoding_field.type.GetPointeeType() + extra_index = data.unsigned + self.data = extra.GetChildAtIndex(extra_index).Cast(data_type).Clone('data') + extra_index += data_type.num_fields + else: + self.data = data.Cast(encoding_field.type).Clone('data') + elif encoding_field.name == 'trailing': + trailing_data = lldb.SBData() + for trailing_field in encoding_field.type.fields: + if trailing_field.type.IsAggregateType(): + trailing_data.Append(extra.GetChildAtIndex(extra_index).address_of.data) + len = dynamic_values['trailing.%s.len' % trailing_field.name].unsigned + trailing_data.Append(lldb.SBData.CreateDataFromInt(len, trailing_data.GetAddressByteSize())) + extra_index += len + else: + pass + self.trailing = self.data.CreateValueFromData('trailing', trailing_data, encoding_field.type) + else: + path = encoding_field.type.GetPointeeType().name.removeprefix('%s::' % encoding_type.name).removeprefix('%s.' % encoding_type.name).partition('__')[0].split('.') + if path[0] == 'data': + dynamic_value = self.data + for name in path[1:]: + dynamic_value = dynamic_value.GetChildMemberWithName(name) + dynamic_values[encoding_field.name] = dynamic_value + except: pass + def has_children(self): return True + def num_children(self): return 2 + (self.trailing is not None) + def get_child_index(self, name): + try: return ('tag', 'data', 'trailing').index(name) + except: return -1 + def get_child_at_index(self, index): return (self.tag, self.data, self.trailing)[index] if index in range(3) else None + def type_Type_pointer(payload): pointee_type = payload.GetChildMemberWithName('pointee_type') sentinel = payload.GetChildMemberWithName('sentinel').GetChildMemberWithName('child') @@ -616,8 +681,5 @@ def __lldb_init_module(debugger, _=None): add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Air\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True) add(debugger, category='zig.stage2', regex=True, type='^Air\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True) add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True) - add(debugger, category='zig.stage2', type='type.Type', identifier='TagOrPayloadPtr', synth=True) - add(debugger, category='zig.stage2', type='type.Type', summary=True) - add(debugger, category='zig.stage2', type='value.Value', identifier='TagOrPayloadPtr', synth=True) - add(debugger, category='zig.stage2', type='value.Value', summary=True) + add(debugger, category='zig.stage2', type='InternPool.Index', synth=True) add(debugger, category='zig.stage2', type='arch.x86_64.CodeGen.MCValue', identifier='zig_TaggedUnion', synth=True, inline_children=True, summary=True) From 484c3e8cbcbd206023468cc16783498bbb662a2c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 21 May 2023 19:22:54 -0400 Subject: [PATCH 090/205] llvm: fix incorrect slice lowering --- src/codegen/llvm.zig | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 1eb6afd994ac..8dec958806a3 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3359,7 +3359,11 @@ pub const DeclGen = struct { else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { .int => |int| return dg.lowerIntAsPtr(int), .ptr => |ptr| { - const ptr_val = switch (ptr.addr) { + const ptr_tv: TypedValue = switch (ptr.len) { + .none => tv, + else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) }, + }; + const llvm_ptr_val = switch (ptr.addr) { .@"var" => |@"var"| ptr: { const decl = dg.module.declPtr(@"var".owner_decl); dg.module.markDeclAlive(decl); @@ -3374,21 +3378,21 @@ pub const DeclGen = struct { val; break :ptr addrspace_casted_ptr; }, - .decl => |decl| try dg.lowerDeclRefValue(tv, decl), - .mut_decl => |mut_decl| try dg.lowerDeclRefValue(tv, mut_decl.decl), + .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl), + .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl), .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), .eu_payload, .opt_payload, .elem, .field, - => try dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0), + => try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).bit_offset % 8 == 0), .comptime_field => unreachable, }; switch (ptr.len) { - .none => return ptr_val, + .none => return llvm_ptr_val, else => { const fields: [2]*llvm.Value = .{ - ptr_val, + llvm_ptr_val, try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }), }; return dg.context.constStruct(&fields, fields.len, .False); From 1b64eed107f54220f85a5158699225b59899a20a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 23 May 2023 19:00:48 -0700 Subject: [PATCH 091/205] remove the kludges from std.builtin I added these in an earlier commit in this branch. This commit removes them before the branch is merged. --- lib/std/builtin.zig | 7 ------- src/Sema.zig | 51 ++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 48 insertions(+), 10 deletions(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 3e8970a354e7..ef93bb14ee14 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -223,13 +223,6 @@ pub const SourceLocation = struct { pub const TypeId = std.meta.Tag(Type); pub const TypeInfo = @compileError("deprecated; use Type"); -/// TODO this is a temporary alias because I don't see any handy methods in -/// Sema for accessing inner declarations. -pub const PtrSize = Type.Pointer.Size; -/// TODO this is a temporary alias because I don't see any handy methods in -/// Sema for accessing inner declarations. -pub const TmpContainerLayoutAlias = Type.ContainerLayout; - /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Type = union(enum) { diff --git a/src/Sema.zig b/src/Sema.zig index cdf02e7c724a..13a1c1cb3db7 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -16116,7 +16116,30 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try info.pointee_type.lazyAbiAlignment(mod, sema.arena); const addrspace_ty = try sema.getBuiltinType("AddressSpace"); - const ptr_size_ty = try sema.getBuiltinType("PtrSize"); + const pointer_ty = t: { + const decl_index = (try sema.namespaceLookup( + block, + src, + (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, + "Pointer", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); + break :t decl.val.toType(); + }; + const ptr_size_ty = t: { + const decl_index = (try sema.namespaceLookup( + block, + src, + pointer_ty.getNamespaceIndex(mod).unwrap().?, + "Size", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); + break :t decl.val.toType(); + }; const field_values = try sema.arena.create([8]Value); field_values.* = .{ @@ -16469,7 +16492,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :v try Value.Tag.opt_payload.create(sema.arena, ty_val); } else Value.null; - const container_layout_ty = try sema.getBuiltinType("TmpContainerLayoutAlias"); + const container_layout_ty = t: { + const decl_index = (try sema.namespaceLookup( + block, + src, + (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, + "ContainerLayout", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); + break :t decl.val.toType(); + }; const field_values = try sema.arena.create([4]Value); field_values.* = .{ @@ -16648,7 +16682,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }; - const container_layout_ty = try sema.getBuiltinType("TmpContainerLayoutAlias"); + const container_layout_ty = t: { + const decl_index = (try sema.namespaceLookup( + block, + src, + (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, + "ContainerLayout", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); + break :t decl.val.toType(); + }; const field_values = try sema.arena.create([5]Value); field_values.* = .{ From 01ca841f1227cc3e17169c45318c8d4757a5c0d2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 23 May 2023 19:10:36 -0700 Subject: [PATCH 092/205] Sema: improve the types_to_resolve mechanism Store `InternPool.Index` as the key instead which means that an AIR instruction no longer needs to be burned to store the type, and also that we can use AutoArrayHashMap instead of an ArrayList, which avoids storing duplicates into the set, potentially saving CPU time. --- src/Module.zig | 5 ++--- src/Sema.zig | 16 +++++++++------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index f4bf48712896..8174778f48ef 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5773,9 +5773,8 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // Similarly, resolve any queued up types that were requested to be resolved for // the backends. - for (sema.types_to_resolve.items) |inst_ref| { - const ty = sema.getTmpAir().getRefType(inst_ref); - sema.resolveTypeFully(ty) catch |err| switch (err) { + for (sema.types_to_resolve.keys()) |ty| { + sema.resolveTypeFully(ty.toType()) catch |err| switch (err) { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, diff --git a/src/Sema.zig b/src/Sema.zig index 13a1c1cb3db7..866c242f15ba 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -66,11 +66,14 @@ comptime_args_fn_inst: Zir.Inst.Index = 0, /// extra hash table lookup in the `monomorphed_funcs` set. /// Sema will set this to null when it takes ownership. preallocated_new_func: ?*Module.Fn = null, -/// The key is `constant` AIR instructions to types that must be fully resolved -/// after the current function body analysis is done. -/// TODO: after upgrading to use InternPool change the key here to be an -/// InternPool value index. -types_to_resolve: std.ArrayListUnmanaged(Air.Inst.Ref) = .{}, +/// The key is types that must be fully resolved prior to machine code +/// generation pass. Types are added to this set when resolving them +/// immediately could cause a dependency loop, but they do need to be resolved +/// before machine code generation passes process the AIR. +/// It would work fine if this were an array list instead of an array hash map. +/// I chose array hash map with the intention to save time by omitting +/// duplicates. +types_to_resolve: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, /// These are lazily created runtime blocks from block_inline instructions. /// They are created when an break_inline passes through a runtime condition, because /// Sema must convert comptime control flow to runtime control flow, which means @@ -34085,8 +34088,7 @@ fn anonStructFieldIndex( } fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { - const inst_ref = try sema.addType(ty); - try sema.types_to_resolve.append(sema.gpa, inst_ref); + try sema.types_to_resolve.put(sema.gpa, ty.toIntern(), {}); } fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { From 20522600399fb38e8e220cab0f8f59234725b9f1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 23 May 2023 19:19:13 -0700 Subject: [PATCH 093/205] Sema: update zirSliceLength to avoid resolveInst(.none) --- src/Sema.zig | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 866c242f15ba..7df6e44898a5 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -10088,7 +10088,7 @@ fn zirSliceLength(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const array_ptr = try sema.resolveInst(extra.lhs); const start = try sema.resolveInst(extra.start); const len = try sema.resolveInst(extra.len); - const sentinel = try sema.resolveInst(extra.sentinel); + const sentinel = if (extra.sentinel == .none) .none else try sema.resolveInst(extra.sentinel); const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node }; const start_src: LazySrcLoc = .{ .node_offset_slice_start = extra.start_src_node_offset }; const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node }; @@ -31997,17 +31997,14 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { const child_ty = try sema.resolveTypeFields(ty.childType(mod)); return sema.resolveTypeFully(child_ty); }, - .Struct => switch (ty.ip_index) { - .none => {}, // TODO make this unreachable when all types are migrated to InternPool - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => return sema.resolveStructFully(ty), - .anon_struct_type => |tuple| { - for (tuple.types) |field_ty| { - try sema.resolveTypeFully(field_ty.toType()); - } - }, - else => {}, + .Struct => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => return sema.resolveStructFully(ty), + .anon_struct_type => |tuple| { + for (tuple.types) |field_ty| { + try sema.resolveTypeFully(field_ty.toType()); + } }, + else => {}, }, .Union => return sema.resolveUnionFully(ty), .Array => return sema.resolveTypeFully(ty.childType(mod)), @@ -32096,8 +32093,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { switch (ty.ip_index) { .var_args_param_type => unreachable, - // TODO: After the InternPool transition is complete, change this to `unreachable`. - .none => return ty, + .none => unreachable, .u1_type, .u8_type, From 5555bdca047f8dbf8d7adfa8f248f5ce9b692b9e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 23 May 2023 19:49:18 -0700 Subject: [PATCH 094/205] InternPool: support int->comptime_int in getCoerced --- src/InternPool.zig | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/InternPool.zig b/src/InternPool.zig index c15e1afeaddc..d19cc3d647df 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3754,6 +3754,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .c_ulong, .c_longlong, .c_ulonglong, + .comptime_int, => return getCoercedInts(ip, gpa, int, new_ty), else => {}, }, @@ -3790,6 +3791,11 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al }, else => {}, } + if (std.debug.runtime_safety) { + std.debug.panic("val={any} new_ty={any}\n", .{ + ip.items.get(@enumToInt(val)), ip.items.get(@enumToInt(new_ty)), + }); + } unreachable; } From 6e0de1d11694a58745da76d601ebab7562feed09 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 22 May 2023 07:58:02 -0400 Subject: [PATCH 095/205] InternPool: port most of value tags --- lib/std/array_list.zig | 44 + src/Air.zig | 6 +- src/AstGen.zig | 30 +- src/Compilation.zig | 5 +- src/InternPool.zig | 827 ++++++-- src/Module.zig | 409 ++-- src/Sema.zig | 2882 +++++++++++++-------------- src/TypedValue.zig | 245 +-- src/Zir.zig | 4 +- src/arch/aarch64/CodeGen.zig | 17 +- src/arch/arm/CodeGen.zig | 12 +- src/arch/riscv64/CodeGen.zig | 11 +- src/arch/sparc64/CodeGen.zig | 11 +- src/arch/wasm/CodeGen.zig | 351 ++-- src/arch/x86_64/CodeGen.zig | 65 +- src/codegen.zig | 1035 ++++------ src/codegen/c.zig | 957 +++++---- src/codegen/llvm.zig | 1608 +++++++-------- src/codegen/spirv.zig | 312 +-- src/link.zig | 19 +- src/link/C.zig | 10 +- src/link/Coff.zig | 18 +- src/link/Dwarf.zig | 8 +- src/link/Elf.zig | 18 +- src/link/MachO.zig | 28 +- src/link/NvPtx.zig | 4 +- src/link/Plan9.zig | 14 +- src/link/SpirV.zig | 6 +- src/link/Wasm.zig | 40 +- src/print_air.zig | 4 +- src/type.zig | 463 +++-- src/value.zig | 1775 ++++------------- tools/lldb_pretty_printers.py | 6 +- tools/stage2_gdb_pretty_printers.py | 2 +- 34 files changed, 5236 insertions(+), 6010 deletions(-) diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index bbfa588d6dc7..c2a2486dfa9b 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -459,6 +459,28 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { return self.items[prev_len..][0..n]; } + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// The returned pointer becomes invalid when the list is resized. + /// Resizes list if `self.capacity` is not large enough. + pub fn addManyAsSlice(self: *Self, n: usize) Allocator.Error![]T { + const prev_len = self.items.len; + try self.resize(self.items.len + n); + return self.items[prev_len..][0..n]; + } + + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// Asserts that there is already space for the new item without allocating more. + /// **Does not** invalidate element pointers. + /// The returned pointer becomes invalid when the list is resized. + pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T { + assert(self.items.len + n <= self.capacity); + const prev_len = self.items.len; + self.items.len += n; + return self.items[prev_len..][0..n]; + } + /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// Invalidates pointers to the removed element. @@ -949,6 +971,28 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ return self.items[prev_len..][0..n]; } + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// The returned pointer becomes invalid when the list is resized. + /// Resizes list if `self.capacity` is not large enough. + pub fn addManyAsSlice(self: *Self, allocator: Allocator, n: usize) Allocator.Error![]T { + const prev_len = self.items.len; + try self.resize(allocator, self.items.len + n); + return self.items[prev_len..][0..n]; + } + + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// Asserts that there is already space for the new item without allocating more. + /// **Does not** invalidate element pointers. + /// The returned pointer becomes invalid when the list is resized. + pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T { + assert(self.items.len + n <= self.capacity); + const prev_len = self.items.len; + self.items.len += n; + return self.items[prev_len..][0..n]; + } + /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// Invalidates pointers to last element. diff --git a/src/Air.zig b/src/Air.zig index 070cf7dc724a..9dcbe174ecdc 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -901,8 +901,8 @@ pub const Inst = struct { manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type), single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), - const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), - const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type), + slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type), + slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type), @@ -1382,7 +1382,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .bool_to_int => return Type.u1, - .tag_name, .error_name => return Type.const_slice_u8_sentinel_0, + .tag_name, .error_name => return Type.slice_const_u8_sentinel_0, .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); diff --git a/src/AstGen.zig b/src/AstGen.zig index 998e08ba04e7..6956a58ae4ed 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -3934,7 +3934,7 @@ fn fnDecl( var section_gz = decl_gz.makeSubBlock(params_scope); defer section_gz.unstack(); const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: { - const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .const_slice_u8_type } }, fn_proto.ast.section_expr); + const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, fn_proto.ast.section_expr); if (section_gz.instructionsSlice().len == 0) { // In this case we will send a len=0 body which can be encoded more efficiently. break :inst inst; @@ -4137,7 +4137,7 @@ fn globalVarDecl( break :inst try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .address_space_type } }, var_decl.ast.addrspace_node); }; const section_inst: Zir.Inst.Ref = if (var_decl.ast.section_node == 0) .none else inst: { - break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .const_slice_u8_type } }, var_decl.ast.section_node); + break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .slice_const_u8_type } }, var_decl.ast.section_node); }; const has_section_or_addrspace = section_inst != .none or addrspace_inst != .none; wip_members.nextDecl(is_pub, is_export, align_inst != .none, has_section_or_addrspace); @@ -7878,7 +7878,7 @@ fn unionInit( params: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const union_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]); const field_type = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{ .container_type = union_type, .field_name = field_name, @@ -8100,12 +8100,12 @@ fn builtinCall( if (ri.rl == .ref) { return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]), }); } const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]), }); return rvalue(gz, ri, result, node); }, @@ -8271,11 +8271,11 @@ fn builtinCall( .align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of), .ptr_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ptr_to_int), - .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .compile_error), + .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .compile_error), .set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota), .enum_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .enum_to_int), .bool_to_int => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .bool_to_int), - .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .embed_file), + .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .embed_file), .error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name), .set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety), .sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt), @@ -8334,7 +8334,7 @@ fn builtinCall( }, .panic => { try emitDbgNode(gz, node); - return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .panic); + return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .panic); }, .trap => { try emitDbgNode(gz, node); @@ -8450,7 +8450,7 @@ fn builtinCall( }, .c_define => { if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{}); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0]); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0]); const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), @@ -8546,7 +8546,7 @@ fn builtinCall( }, .field_parent_ptr => { const parent_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]); const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{ .parent_type = parent_type, .field_name = field_name, @@ -8701,7 +8701,7 @@ fn hasDeclOrField( tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const container_type = try typeExpr(gz, scope, lhs_node); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = container_type, .rhs = name, @@ -8851,7 +8851,7 @@ fn simpleCBuiltin( ) InnerError!Zir.Inst.Ref { const name: []const u8 = if (tag == .c_undef) "C undef" else "C include"; if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name}); - const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, operand_node); + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, operand_node); _ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = operand, @@ -8869,7 +8869,7 @@ fn offsetOf( tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const type_inst = try typeExpr(gz, scope, lhs_node); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = type_inst, .rhs = field_name, @@ -10317,8 +10317,8 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_type), as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type), as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), - as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type), - as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_sentinel_0_type), + as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_sentinel_0_type), as_ty | @enumToInt(Zir.Inst.Ref.anyerror_void_error_union_type), as_ty | @enumToInt(Zir.Inst.Ref.generic_poison_type), as_ty | @enumToInt(Zir.Inst.Ref.empty_struct_type), diff --git a/src/Compilation.zig b/src/Compilation.zig index 43b16241fc71..30ac49995597 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -226,7 +226,7 @@ const Job = union(enum) { /// Write the constant value for a Decl to the output file. codegen_decl: Module.Decl.Index, /// Write the machine code for a function to the output file. - codegen_func: *Module.Fn, + codegen_func: Module.Fn.Index, /// Render the .h file snippet for the Decl. emit_h_decl: Module.Decl.Index, /// The Decl needs to be analyzed and possibly export itself. @@ -3208,7 +3208,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v // Tests are always emitted in test binaries. The decl_refs are created by // Module.populateTestFunctions, but this will not queue body analysis, so do // that now. - try module.ensureFuncBodyAnalysisQueued(decl.val.castTag(.function).?.data); + const func_index = module.intern_pool.indexToFunc(decl.val.ip_index).unwrap().?; + try module.ensureFuncBodyAnalysisQueued(func_index); } }, .update_embed_file => |embed_file| { diff --git a/src/InternPool.zig b/src/InternPool.zig index d19cc3d647df..ec4d1df45fdf 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -34,6 +34,12 @@ allocated_unions: std.SegmentedList(Module.Union, 0) = .{}, /// When a Union object is freed from `allocated_unions`, it is pushed into this stack. unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, +/// Fn objects are stored in this data structure because: +/// * They need to be mutated after creation. +allocated_funcs: std.SegmentedList(Module.Fn, 0) = .{}, +/// When a Fn object is freed from `allocated_funcs`, it is pushed into this stack. +funcs_free_list: std.ArrayListUnmanaged(Module.Fn.Index) = .{}, + /// InferredErrorSet objects are stored in this data structure because: /// * They contain pointers such as the errors map and the set of other inferred error sets. /// * They need to be mutated after creation. @@ -66,18 +72,18 @@ const Limb = std.math.big.Limb; const InternPool = @This(); const Module = @import("Module.zig"); +const Sema = @import("Sema.zig"); const KeyAdapter = struct { intern_pool: *const InternPool, pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool { _ = b_void; - return ctx.intern_pool.indexToKey(@intToEnum(Index, b_map_index)).eql(a); + return ctx.intern_pool.indexToKey(@intToEnum(Index, b_map_index)).eql(a, ctx.intern_pool); } pub fn hash(ctx: @This(), a: Key) u32 { - _ = ctx; - return a.hash32(); + return a.hash32(ctx.intern_pool); } }; @@ -111,10 +117,19 @@ pub const RuntimeIndex = enum(u32) { } }; +/// An index into `string_bytes`. +pub const String = enum(u32) { + _, +}; + /// An index into `string_bytes`. pub const NullTerminatedString = enum(u32) { _, + pub fn toString(self: NullTerminatedString) String { + return @intToEnum(String, @enumToInt(self)); + } + pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { return @intToEnum(OptionalNullTerminatedString, @enumToInt(self)); } @@ -180,23 +195,20 @@ pub const Key = union(enum) { /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented /// via `simple_value` and has a named `Index` tag for it. undef: Index, + runtime_value: TypeValue, simple_value: SimpleValue, - extern_func: struct { - ty: Index, - /// The Decl that corresponds to the function itself. - decl: Module.Decl.Index, - /// Library name if specified. - /// For example `extern "c" fn write(...) usize` would have 'c' as library name. - /// Index into the string table bytes. - lib_name: u32, - }, + variable: Key.Variable, + extern_func: ExternFunc, + func: Func, int: Key.Int, + err: Error, + error_union: ErrorUnion, + enum_literal: NullTerminatedString, /// A specific enum tag, indicated by the integer tag value. enum_tag: Key.EnumTag, float: Key.Float, ptr: Ptr, opt: Opt, - /// An instance of a struct, array, or vector. /// Each element/field stored as an `Index`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, @@ -261,7 +273,7 @@ pub const Key = union(enum) { pub const ArrayType = struct { len: u64, child: Index, - sentinel: Index, + sentinel: Index = .none, }; pub const VectorType = struct { @@ -369,6 +381,7 @@ pub const Key = union(enum) { return @intCast(u32, x); }, .i64, .big_int => return null, // out of range + .lazy_align, .lazy_size => unreachable, } } }; @@ -441,6 +454,32 @@ pub const Key = union(enum) { } }; + pub const Variable = struct { + ty: Index, + init: Index, + decl: Module.Decl.Index, + lib_name: OptionalNullTerminatedString = .none, + is_extern: bool = false, + is_const: bool = false, + is_threadlocal: bool = false, + is_weak_linkage: bool = false, + }; + + pub const ExternFunc = struct { + ty: Index, + /// The Decl that corresponds to the function itself. + decl: Module.Decl.Index, + /// Library name if specified. + /// For example `extern "c" fn write(...) usize` would have 'c' as library name. + /// Index into the string table bytes. + lib_name: OptionalNullTerminatedString, + }; + + pub const Func = struct { + ty: Index, + index: Module.Fn.Index, + }; + pub const Int = struct { ty: Index, storage: Storage, @@ -449,6 +488,8 @@ pub const Key = union(enum) { u64: u64, i64: i64, big_int: BigIntConst, + lazy_align: Index, + lazy_size: Index, /// Big enough to fit any non-BigInt value pub const BigIntSpace = struct { @@ -460,13 +501,26 @@ pub const Key = union(enum) { pub fn toBigInt(storage: Storage, space: *BigIntSpace) BigIntConst { return switch (storage) { .big_int => |x| x, - .u64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), - .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), + inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), + .lazy_align, .lazy_size => unreachable, }; } }; }; + pub const Error = struct { + ty: Index, + name: NullTerminatedString, + }; + + pub const ErrorUnion = struct { + ty: Index, + val: union(enum) { + err_name: NullTerminatedString, + payload: Index, + }, + }; + pub const EnumTag = struct { /// The enum type. ty: Index, @@ -497,19 +551,8 @@ pub const Key = union(enum) { len: Index = .none, pub const Addr = union(enum) { - @"var": struct { - init: Index, - owner_decl: Module.Decl.Index, - lib_name: OptionalNullTerminatedString, - is_const: bool, - is_threadlocal: bool, - is_weak_linkage: bool, - }, decl: Module.Decl.Index, - mut_decl: struct { - decl: Module.Decl.Index, - runtime_index: RuntimeIndex, - }, + mut_decl: MutDecl, int: Index, eu_payload: Index, opt_payload: Index, @@ -517,6 +560,10 @@ pub const Key = union(enum) { elem: BaseIndex, field: BaseIndex, + pub const MutDecl = struct { + decl: Module.Decl.Index, + runtime_index: RuntimeIndex, + }; pub const BaseIndex = struct { base: Index, index: u64, @@ -546,22 +593,31 @@ pub const Key = union(enum) { storage: Storage, pub const Storage = union(enum) { + bytes: []const u8, elems: []const Index, repeated_elem: Index, + + pub fn values(self: *const Storage) []const Index { + return switch (self.*) { + .bytes => &.{}, + .elems => |elems| elems, + .repeated_elem => |*elem| @as(*const [1]Index, elem), + }; + } }; }; - pub fn hash32(key: Key) u32 { - return @truncate(u32, key.hash64()); + pub fn hash32(key: Key, ip: *const InternPool) u32 { + return @truncate(u32, key.hash64(ip)); } - pub fn hash64(key: Key) u64 { + pub fn hash64(key: Key, ip: *const InternPool) u64 { var hasher = std.hash.Wyhash.init(0); - key.hashWithHasher(&hasher); + key.hashWithHasher(&hasher, ip); return hasher.final(); } - pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash) void { + pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash, ip: *const InternPool) void { const KeyTag = @typeInfo(Key).Union.tag_type.?; const key_tag: KeyTag = key; std.hash.autoHash(hasher, key_tag); @@ -575,27 +631,45 @@ pub const Key = union(enum) { .error_union_type, .simple_type, .simple_value, - .extern_func, .opt, .struct_type, .union_type, .un, .undef, + .err, + .error_union, + .enum_literal, .enum_tag, .inferred_error_set_type, => |info| std.hash.autoHash(hasher, info), + .runtime_value => |runtime_value| std.hash.autoHash(hasher, runtime_value.val), .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), .enum_type => |enum_type| std.hash.autoHash(hasher, enum_type.decl), + .variable => |variable| std.hash.autoHash(hasher, variable.decl), + .extern_func => |extern_func| std.hash.autoHash(hasher, extern_func.decl), + .func => |func| std.hash.autoHash(hasher, func.index), + .int => |int| { // Canonicalize all integers by converting them to BigIntConst. - var buffer: Key.Int.Storage.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - - std.hash.autoHash(hasher, int.ty); - std.hash.autoHash(hasher, big_int.positive); - for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb); + switch (int.storage) { + .u64, .i64, .big_int => { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + + std.hash.autoHash(hasher, int.ty); + std.hash.autoHash(hasher, big_int.positive); + for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb); + }, + .lazy_align, .lazy_size => |lazy_ty| { + std.hash.autoHash( + hasher, + @as(@typeInfo(Key.Int.Storage).Union.tag_type.?, int.storage), + ); + std.hash.autoHash(hasher, lazy_ty); + }, + } }, .float => |float| { @@ -615,7 +689,6 @@ pub const Key = union(enum) { // This is sound due to pointer provenance rules. std.hash.autoHash(hasher, @as(@typeInfo(Key.Ptr.Addr).Union.tag_type.?, ptr.addr)); switch (ptr.addr) { - .@"var" => |@"var"| std.hash.autoHash(hasher, @"var".owner_decl), .decl => |decl| std.hash.autoHash(hasher, decl), .mut_decl => |mut_decl| std.hash.autoHash(hasher, mut_decl), .int => |int| std.hash.autoHash(hasher, int), @@ -629,13 +702,47 @@ pub const Key = union(enum) { .aggregate => |aggregate| { std.hash.autoHash(hasher, aggregate.ty); - std.hash.autoHash(hasher, @as( - @typeInfo(Key.Aggregate.Storage).Union.tag_type.?, - aggregate.storage, - )); + switch (ip.indexToKey(aggregate.ty)) { + .array_type => |array_type| if (array_type.child == .u8_type) switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte), + .elems => |elems| { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + for (elems) |elem| std.hash.autoHash( + hasher, + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable, + ); + }, + .repeated_elem => |elem| { + const len = ip.aggregateTypeLen(aggregate.ty); + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable; + var i: u64 = 0; + while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); + }, + }, + else => {}, + } + switch (aggregate.storage) { - .elems => |elems| for (elems) |elem| std.hash.autoHash(hasher, elem), - .repeated_elem => |elem| std.hash.autoHash(hasher, elem), + .bytes => unreachable, + .elems => |elems| { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + for (elems) |elem| std.hash.autoHash( + hasher, + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable, + ); + }, + .repeated_elem => |elem| { + const len = ip.aggregateTypeLen(aggregate.ty); + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable; + var i: u64 = 0; + while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); + }, } }, @@ -663,7 +770,7 @@ pub const Key = union(enum) { } } - pub fn eql(a: Key, b: Key) bool { + pub fn eql(a: Key, b: Key, ip: *const InternPool) bool { const KeyTag = @typeInfo(Key).Union.tag_type.?; const a_tag: KeyTag = a; const b_tag: KeyTag = b; @@ -709,9 +816,9 @@ pub const Key = union(enum) { const b_info = b.undef; return a_info == b_info; }, - .extern_func => |a_info| { - const b_info = b.extern_func; - return std.meta.eql(a_info, b_info); + .runtime_value => |a_info| { + const b_info = b.runtime_value; + return a_info.val == b_info.val; }, .opt => |a_info| { const b_info = b.opt; @@ -729,11 +836,36 @@ pub const Key = union(enum) { const b_info = b.un; return std.meta.eql(a_info, b_info); }, + .err => |a_info| { + const b_info = b.err; + return std.meta.eql(a_info, b_info); + }, + .error_union => |a_info| { + const b_info = b.error_union; + return std.meta.eql(a_info, b_info); + }, + .enum_literal => |a_info| { + const b_info = b.enum_literal; + return a_info == b_info; + }, .enum_tag => |a_info| { const b_info = b.enum_tag; return std.meta.eql(a_info, b_info); }, + .variable => |a_info| { + const b_info = b.variable; + return a_info.decl == b_info.decl; + }, + .extern_func => |a_info| { + const b_info = b.extern_func; + return a_info.decl == b_info.decl; + }, + .func => |a_info| { + const b_info = b.func; + return a_info.index == b_info.index; + }, + .ptr => |a_info| { const b_info = b.ptr; if (a_info.ty != b_info.ty or a_info.len != b_info.len) return false; @@ -742,7 +874,6 @@ pub const Key = union(enum) { if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false; return switch (a_info.addr) { - .@"var" => |a_var| a_var.owner_decl == b_info.addr.@"var".owner_decl, .decl => |a_decl| a_decl == b_info.addr.decl, .mut_decl => |a_mut_decl| std.meta.eql(a_mut_decl, b_info.addr.mut_decl), .int => |a_int| a_int == b_info.addr.int, @@ -765,16 +896,27 @@ pub const Key = union(enum) { .u64 => |bb| aa == bb, .i64 => |bb| aa == bb, .big_int => |bb| bb.orderAgainstScalar(aa) == .eq, + .lazy_align, .lazy_size => false, }, .i64 => |aa| switch (b_info.storage) { .u64 => |bb| aa == bb, .i64 => |bb| aa == bb, .big_int => |bb| bb.orderAgainstScalar(aa) == .eq, + .lazy_align, .lazy_size => false, }, .big_int => |aa| switch (b_info.storage) { .u64 => |bb| aa.orderAgainstScalar(bb) == .eq, .i64 => |bb| aa.orderAgainstScalar(bb) == .eq, .big_int => |bb| aa.eq(bb), + .lazy_align, .lazy_size => false, + }, + .lazy_align => |aa| switch (b_info.storage) { + .u64, .i64, .big_int, .lazy_size => false, + .lazy_align => |bb| aa == bb, + }, + .lazy_size => |aa| switch (b_info.storage) { + .u64, .i64, .big_int, .lazy_align => false, + .lazy_size => |bb| aa == bb, }, }; }, @@ -818,12 +960,43 @@ pub const Key = union(enum) { if (a_info.ty != b_info.ty) return false; const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?; - if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) return false; + if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) { + for (0..@intCast(usize, ip.aggregateTypeLen(a_info.ty))) |elem_index| { + const a_elem = switch (a_info.storage) { + .bytes => |bytes| ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[elem_index] }, + } }) orelse return false, + .elems => |elems| elems[elem_index], + .repeated_elem => |elem| elem, + }; + const b_elem = switch (b_info.storage) { + .bytes => |bytes| ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[elem_index] }, + } }) orelse return false, + .elems => |elems| elems[elem_index], + .repeated_elem => |elem| elem, + }; + if (a_elem != b_elem) return false; + } + return true; + } - return switch (a_info.storage) { - .elems => |a_elems| std.mem.eql(Index, a_elems, b_info.storage.elems), - .repeated_elem => |a_elem| a_elem == b_info.storage.repeated_elem, - }; + switch (a_info.storage) { + .bytes => |a_bytes| { + const b_bytes = b_info.storage.bytes; + return std.mem.eql(u8, a_bytes, b_bytes); + }, + .elems => |a_elems| { + const b_elems = b_info.storage.elems; + return std.mem.eql(Index, a_elems, b_elems); + }, + .repeated_elem => |a_elem| { + const b_elem = b_info.storage.repeated_elem; + return a_elem == b_elem; + }, + } }, .anon_struct_type => |a_info| { const b_info = b.anon_struct_type; @@ -876,16 +1049,23 @@ pub const Key = union(enum) { .func_type, => .type_type, - inline .ptr, + inline .runtime_value, + .ptr, .int, .float, .opt, + .variable, .extern_func, + .func, + .err, + .error_union, .enum_tag, .aggregate, .un, => |x| x.ty, + .enum_literal => .enum_literal_type, + .undef => |x| x, .simple_value => |s| switch (s) { @@ -977,8 +1157,8 @@ pub const Index = enum(u32) { manyptr_const_u8_type, manyptr_const_u8_sentinel_0_type, single_const_pointer_to_comptime_int_type, - const_slice_u8_type, - const_slice_u8_sentinel_0_type, + slice_const_u8_type, + slice_const_u8_sentinel_0_type, anyerror_void_error_union_type, generic_poison_type, inferred_alloc_const_type, @@ -1128,11 +1308,11 @@ pub const Index = enum(u32) { }, undef: DataIsIndex, + runtime_value: DataIsIndex, simple_value: struct { data: SimpleValue }, - ptr_var: struct { data: *PtrVar }, ptr_mut_decl: struct { data: *PtrMutDecl }, ptr_decl: struct { data: *PtrDecl }, - ptr_int: struct { data: *PtrInt }, + ptr_int: struct { data: *PtrAddr }, ptr_eu_payload: DataIsIndex, ptr_opt_payload: DataIsIndex, ptr_comptime_field: struct { data: *PtrComptimeField }, @@ -1151,6 +1331,12 @@ pub const Index = enum(u32) { int_small: struct { data: *IntSmall }, int_positive: struct { data: u32 }, int_negative: struct { data: u32 }, + int_lazy_align: struct { data: *IntLazy }, + int_lazy_size: struct { data: *IntLazy }, + error_set_error: struct { data: *Key.Error }, + error_union_error: struct { data: *Key.Error }, + error_union_payload: struct { data: *TypeValue }, + enum_literal: struct { data: NullTerminatedString }, enum_tag: struct { data: *Key.EnumTag }, float_f16: struct { data: f16 }, float_f32: struct { data: f32 }, @@ -1160,18 +1346,21 @@ pub const Index = enum(u32) { float_c_longdouble_f80: struct { data: *Float80 }, float_c_longdouble_f128: struct { data: *Float128 }, float_comptime_float: struct { data: *Float128 }, + variable: struct { data: *Variable }, extern_func: struct { data: void }, func: struct { data: void }, only_possible_value: DataIsIndex, union_value: struct { data: *Key.Union }, + bytes: struct { data: *Bytes }, aggregate: struct { data: *Aggregate }, repeated: struct { data: *Repeated }, }) void { _ = self; - @setEvalBranchQuota(10_000); - inline for (@typeInfo(Tag).Enum.fields) |tag| { - inline for (@typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields) |entry| { - if (comptime std.mem.eql(u8, tag.name, entry.name)) break; + const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields; + @setEvalBranchQuota(2_000); + inline for (@typeInfo(Tag).Enum.fields, 0..) |tag, start| { + inline for (0..map_fields.len) |offset| { + if (comptime std.mem.eql(u8, tag.name, map_fields[(start + offset) % map_fields.len].name)) break; } else { @compileError(@typeName(Tag) ++ "." ++ tag.name ++ " missing dbHelper tag_to_encoding_map entry"); } @@ -1318,14 +1507,14 @@ pub const static_keys = [_]Key{ .is_const = true, } }, - // const_slice_u8_type + // slice_const_u8_type .{ .ptr_type = .{ .elem_type = .u8_type, .size = .Slice, .is_const = true, } }, - // const_slice_u8_sentinel_0_type + // slice_const_u8_sentinel_0_type .{ .ptr_type = .{ .elem_type = .u8_type, .sentinel = .zero_u8, @@ -1505,12 +1694,13 @@ pub const Tag = enum(u8) { /// `data` is `Index` of the type. /// Untyped `undefined` is stored instead via `simple_value`. undef, + /// A wrapper for values which are comptime-known but should + /// semantically be runtime-known. + /// `data` is `Index` of the value. + runtime_value, /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, - /// A pointer to a var. - /// data is extra index of PtrVal, which contains the type and address. - ptr_var, /// A pointer to a decl that can be mutated at comptime. /// data is extra index of PtrMutDecl, which contains the type and address. ptr_mut_decl, @@ -1518,7 +1708,7 @@ pub const Tag = enum(u8) { /// data is extra index of PtrDecl, which contains the type and address. ptr_decl, /// A pointer with an integer value. - /// data is extra index of PtrInt, which contains the type and address. + /// data is extra index of PtrAddr, which contains the type and address. /// Only pointer types are allowed to have this encoding. Optional types must use /// `opt_payload` or `opt_null`. ptr_int, @@ -1585,6 +1775,24 @@ pub const Tag = enum(u8) { /// A negative integer value. /// data is a limbs index to `Int`. int_negative, + /// The ABI alignment of a lazy type. + /// data is extra index of `IntLazy`. + int_lazy_align, + /// The ABI size of a lazy type. + /// data is extra index of `IntLazy`. + int_lazy_size, + /// An error value. + /// data is extra index of `Key.Error`. + error_set_error, + /// An error union error. + /// data is extra index of `Key.Error`. + error_union_error, + /// An error union payload. + /// data is extra index of `TypeValue`. + error_union_payload, + /// An enum literal value. + /// data is `NullTerminatedString` of the error name. + enum_literal, /// An enum tag value. /// data is extra index of `Key.EnumTag`. enum_tag, @@ -1617,9 +1825,14 @@ pub const Tag = enum(u8) { /// A comptime_float value. /// data is extra index to Float128. float_comptime_float, + /// A global variable. + /// data is extra index to Variable. + variable, /// An extern function. + /// data is extra index to Key.ExternFunc. extern_func, /// A regular function. + /// data is extra index to Key.Func. func, /// This represents the only possible value for *some* types which have /// only one possible value. Not all only-possible-values are encoded this way; @@ -1631,6 +1844,9 @@ pub const Tag = enum(u8) { only_possible_value, /// data is extra index to Key.Union. union_value, + /// An array of bytes. + /// data is extra index to `Bytes`. + bytes, /// An instance of a struct, array, or vector. /// data is extra index to `Aggregate`. aggregate, @@ -1670,6 +1886,13 @@ pub const TypeFunction = struct { }; }; +pub const Bytes = struct { + /// The type of the aggregate + ty: Index, + /// Index into string_bytes, of len ip.aggregateTypeLen(ty) + bytes: String, +}; + /// Trailing: /// 0. element: Index for each len /// len is determined by the aggregate type. @@ -1843,6 +2066,11 @@ pub const Array = struct { } }; +pub const TypeValue = struct { + ty: Index, + val: Index, +}; + /// Trailing: /// 0. field name: NullTerminatedString for each fields_len; declaration order /// 1. tag value: Index for each fields_len; declaration order @@ -1888,21 +2116,22 @@ pub const PackedU64 = packed struct(u64) { } }; -pub const PtrVar = struct { - ty: Index, - /// If flags.is_extern == true this is `none`. +pub const Variable = struct { + /// This is a value if has_init is true, otherwise a type. init: Index, - owner_decl: Module.Decl.Index, + decl: Module.Decl.Index, /// Library name if specified. /// For example `extern "c" var stderrp = ...` would have 'c' as library name. lib_name: OptionalNullTerminatedString, flags: Flags, pub const Flags = packed struct(u32) { + has_init: bool, + is_extern: bool, is_const: bool, is_threadlocal: bool, is_weak_linkage: bool, - _: u29 = 0, + _: u27 = 0, }; }; @@ -1917,7 +2146,7 @@ pub const PtrMutDecl = struct { runtime_index: RuntimeIndex, }; -pub const PtrInt = struct { +pub const PtrAddr = struct { ty: Index, addr: Index, }; @@ -1949,6 +2178,11 @@ pub const IntSmall = struct { value: u32, }; +pub const IntLazy = struct { + ty: Index, + lazy_ty: Index, +}; + /// A f64 value, broken up into 2 u32 parts. pub const Float64 = struct { piece0: u32, @@ -2063,6 +2297,9 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.unions_free_list.deinit(gpa); ip.allocated_unions.deinit(gpa); + ip.funcs_free_list.deinit(gpa); + ip.allocated_funcs.deinit(gpa); + ip.inferred_error_sets_free_list.deinit(gpa); ip.allocated_inferred_error_sets.deinit(gpa); @@ -2235,6 +2472,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_function => .{ .func_type = indexToKeyFuncType(ip, data) }, .undef => .{ .undef = @intToEnum(Index, data) }, + .runtime_value => { + const val = @intToEnum(Index, data); + return .{ .runtime_value = .{ + .ty = ip.typeOf(val), + .val = val, + } }; + }, .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), .val = .none, @@ -2251,18 +2495,11 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .val = payload_val, } }; }, - .ptr_var => { - const info = ip.extraData(PtrVar, data); + .ptr_decl => { + const info = ip.extraData(PtrDecl, data); return .{ .ptr = .{ .ty = info.ty, - .addr = .{ .@"var" = .{ - .init = info.init, - .owner_decl = info.owner_decl, - .lib_name = info.lib_name, - .is_const = info.flags.is_const, - .is_threadlocal = info.flags.is_threadlocal, - .is_weak_linkage = info.flags.is_weak_linkage, - } }, + .addr = .{ .decl = info.decl }, } }; }, .ptr_mut_decl => { @@ -2275,15 +2512,8 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }, } }; }, - .ptr_decl => { - const info = ip.extraData(PtrDecl, data); - return .{ .ptr = .{ - .ty = info.ty, - .addr = .{ .decl = info.decl }, - } }; - }, .ptr_int => { - const info = ip.extraData(PtrInt, data); + const info = ip.extraData(PtrAddr, data); return .{ .ptr = .{ .ty = info.ty, .addr = .{ .int = info.addr }, @@ -2383,6 +2613,17 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .storage = .{ .u64 = info.value }, } }; }, + .int_lazy_align, .int_lazy_size => |tag| { + const info = ip.extraData(IntLazy, data); + return .{ .int = .{ + .ty = info.ty, + .storage = switch (tag) { + .int_lazy_align => .{ .lazy_align = info.lazy_ty }, + .int_lazy_size => .{ .lazy_size = info.lazy_ty }, + else => unreachable, + }, + } }; + }, .float_f16 => .{ .float = .{ .ty = .f16_type, .storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) }, @@ -2415,8 +2656,21 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .ty = .comptime_float_type, .storage = .{ .f128 = ip.extraData(Float128, data).get() }, } }, - .extern_func => @panic("TODO"), - .func => @panic("TODO"), + .variable => { + const extra = ip.extraData(Variable, data); + return .{ .variable = .{ + .ty = if (extra.flags.has_init) ip.typeOf(extra.init) else extra.init, + .init = if (extra.flags.has_init) extra.init else .none, + .decl = extra.decl, + .lib_name = extra.lib_name, + .is_extern = extra.flags.is_extern, + .is_const = extra.flags.is_const, + .is_threadlocal = extra.flags.is_threadlocal, + .is_weak_linkage = extra.flags.is_weak_linkage, + } }; + }, + .extern_func => .{ .extern_func = ip.extraData(Key.ExternFunc, data) }, + .func => .{ .func = ip.extraData(Key.Func, data) }, .only_possible_value => { const ty = @intToEnum(Index, data); return switch (ip.indexToKey(ty)) { @@ -2438,6 +2692,14 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { else => unreachable, }; }, + .bytes => { + const extra = ip.extraData(Bytes, data); + const len = @intCast(u32, ip.aggregateTypeLen(extra.ty)); + return .{ .aggregate = .{ + .ty = extra.ty, + .storage = .{ .bytes = ip.string_bytes.items[@enumToInt(extra.bytes)..][0..len] }, + } }; + }, .aggregate => { const extra = ip.extraDataTrail(Aggregate, data); const len = @intCast(u32, ip.aggregateTypeLen(extra.data.ty)); @@ -2455,6 +2717,22 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }; }, .union_value => .{ .un = ip.extraData(Key.Union, data) }, + .error_set_error => .{ .err = ip.extraData(Key.Error, data) }, + .error_union_error => { + const extra = ip.extraData(Key.Error, data); + return .{ .error_union = .{ + .ty = extra.ty, + .val = .{ .err_name = extra.name }, + } }; + }, + .error_union_payload => { + const extra = ip.extraData(TypeValue, data); + return .{ .error_union = .{ + .ty = extra.ty, + .val = .{ .payload = extra.val }, + } }; + }, + .enum_literal => .{ .enum_literal = @intToEnum(NullTerminatedString, data) }, .enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) }, }; } @@ -2547,7 +2825,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { _ = ip.map.pop(); var new_key = key; new_key.ptr_type.size = .Many; - const ptr_type_index = try get(ip, gpa, new_key); + const ptr_type_index = try ip.get(gpa, new_key); assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ @@ -2677,6 +2955,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = @enumToInt(ty), }); }, + .runtime_value => |runtime_value| { + assert(runtime_value.ty == ip.typeOf(runtime_value.val)); + ip.items.appendAssumeCapacity(.{ + .tag = .runtime_value, + .data = @enumToInt(runtime_value.val), + }); + }, .struct_type => |struct_type| { ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{ @@ -2809,7 +3094,35 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types)); }, - .extern_func => @panic("TODO"), + .variable => |variable| { + const has_init = variable.init != .none; + if (has_init) assert(variable.ty == ip.typeOf(variable.init)); + ip.items.appendAssumeCapacity(.{ + .tag = .variable, + .data = try ip.addExtra(gpa, Variable{ + .init = if (has_init) variable.init else variable.ty, + .decl = variable.decl, + .lib_name = variable.lib_name, + .flags = .{ + .has_init = has_init, + .is_extern = variable.is_extern, + .is_const = variable.is_const, + .is_threadlocal = variable.is_threadlocal, + .is_weak_linkage = variable.is_weak_linkage, + }, + }), + }); + }, + + .extern_func => |extern_func| ip.items.appendAssumeCapacity(.{ + .tag = .extern_func, + .data = try ip.addExtra(gpa, extern_func), + }), + + .func => |func| ip.items.appendAssumeCapacity(.{ + .tag = .func, + .data = try ip.addExtra(gpa, func), + }), .ptr => |ptr| { const ptr_type = ip.indexToKey(ptr.ty).ptr_type; @@ -2817,20 +3130,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .none => { assert(ptr_type.size != .Slice); switch (ptr.addr) { - .@"var" => |@"var"| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_var, - .data = try ip.addExtra(gpa, PtrVar{ - .ty = ptr.ty, - .init = @"var".init, - .owner_decl = @"var".owner_decl, - .lib_name = @"var".lib_name, - .flags = .{ - .is_const = @"var".is_const, - .is_threadlocal = @"var".is_threadlocal, - .is_weak_linkage = @"var".is_weak_linkage, - }, - }), - }), .decl => |decl| ip.items.appendAssumeCapacity(.{ .tag = .ptr_decl, .data = try ip.addExtra(gpa, PtrDecl{ @@ -2846,31 +3145,41 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .runtime_index = mut_decl.runtime_index, }), }), - .int => |int| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_int, - .data = try ip.addExtra(gpa, PtrInt{ - .ty = ptr.ty, - .addr = int, - }), - }), - .eu_payload, .opt_payload => |data| ip.items.appendAssumeCapacity(.{ - .tag = switch (ptr.addr) { - .eu_payload => .ptr_eu_payload, - .opt_payload => .ptr_opt_payload, - else => unreachable, - }, - .data = @enumToInt(data), - }), - .comptime_field => |field_val| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_comptime_field, - .data = try ip.addExtra(gpa, PtrComptimeField{ - .ty = ptr.ty, - .field_val = field_val, - }), - }), + .int => |int| { + assert(int != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_int, + .data = try ip.addExtra(gpa, PtrAddr{ + .ty = ptr.ty, + .addr = int, + }), + }); + }, + .eu_payload, .opt_payload => |data| { + assert(data != .none); + ip.items.appendAssumeCapacity(.{ + .tag = switch (ptr.addr) { + .eu_payload => .ptr_eu_payload, + .opt_payload => .ptr_opt_payload, + else => unreachable, + }, + .data = @enumToInt(data), + }); + }, + .comptime_field => |field_val| { + assert(field_val != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_comptime_field, + .data = try ip.addExtra(gpa, PtrComptimeField{ + .ty = ptr.ty, + .field_val = field_val, + }), + }); + }, .elem, .field => |base_index| { + assert(base_index.base != .none); _ = ip.map.pop(); - const index_index = try get(ip, gpa, .{ .int = .{ + const index_index = try ip.get(gpa, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = base_index.index }, } }); @@ -2894,7 +3203,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { new_key.ptr.ty = ip.slicePtrType(ptr.ty); new_key.ptr.len = .none; assert(ip.indexToKey(new_key.ptr.ty).ptr_type.size == .Many); - const ptr_index = try get(ip, gpa, new_key); + const ptr_index = try ip.get(gpa, new_key); assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ @@ -2921,8 +3230,25 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .int => |int| b: { + assert(int.ty == .comptime_int_type or ip.indexToKey(int.ty) == .int_type); + switch (int.storage) { + .u64, .i64, .big_int => {}, + .lazy_align, .lazy_size => |lazy_ty| { + ip.items.appendAssumeCapacity(.{ + .tag = switch (int.storage) { + else => unreachable, + .lazy_align => .int_lazy_align, + .lazy_size => .int_lazy_size, + }, + .data = try ip.addExtra(gpa, IntLazy{ + .ty = int.ty, + .lazy_ty = lazy_ty, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + }, + } switch (int.ty) { - .none => unreachable, .u8_type => switch (int.storage) { .big_int => |big_int| { ip.items.appendAssumeCapacity(.{ @@ -2938,6 +3264,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); break :b; }, + .lazy_align, .lazy_size => unreachable, }, .u16_type => switch (int.storage) { .big_int => |big_int| { @@ -2954,6 +3281,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); break :b; }, + .lazy_align, .lazy_size => unreachable, }, .u32_type => switch (int.storage) { .big_int => |big_int| { @@ -2970,6 +3298,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); break :b; }, + .lazy_align, .lazy_size => unreachable, }, .i32_type => switch (int.storage) { .big_int => |big_int| { @@ -2987,6 +3316,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); break :b; }, + .lazy_align, .lazy_size => unreachable, }, .usize_type => switch (int.storage) { .big_int => |big_int| { @@ -3007,6 +3337,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { break :b; } }, + .lazy_align, .lazy_size => unreachable, }, .comptime_int_type => switch (int.storage) { .big_int => |big_int| { @@ -3041,6 +3372,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { break :b; } }, + .lazy_align, .lazy_size => unreachable, }, else => {}, } @@ -3077,9 +3409,37 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const tag: Tag = if (big_int.positive) .int_positive else .int_negative; try addInt(ip, gpa, int.ty, tag, big_int.limbs); }, + .lazy_align, .lazy_size => unreachable, } }, + .err => |err| ip.items.appendAssumeCapacity(.{ + .tag = .error_set_error, + .data = try ip.addExtra(gpa, err), + }), + + .error_union => |error_union| ip.items.appendAssumeCapacity(switch (error_union.val) { + .err_name => |err_name| .{ + .tag = .error_union_error, + .data = try ip.addExtra(gpa, Key.Error{ + .ty = error_union.ty, + .name = err_name, + }), + }, + .payload => |payload| .{ + .tag = .error_union_payload, + .data = try ip.addExtra(gpa, TypeValue{ + .ty = error_union.ty, + .val = payload, + }), + }, + }), + + .enum_literal => |enum_literal| ip.items.appendAssumeCapacity(.{ + .tag = .enum_literal, + .data = @enumToInt(enum_literal), + }), + .enum_tag => |enum_tag| { assert(enum_tag.ty != .none); assert(enum_tag.int != .none); @@ -3131,9 +3491,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .aggregate => |aggregate| { - assert(aggregate.ty != .none); + const ty_key = ip.indexToKey(aggregate.ty); const aggregate_len = ip.aggregateTypeLen(aggregate.ty); switch (aggregate.storage) { + .bytes => { + assert(ty_key.array_type.child == .u8_type); + }, .elems => |elems| { assert(elems.len == aggregate_len); for (elems) |elem| assert(elem != .none); @@ -3151,9 +3514,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } - switch (ip.indexToKey(aggregate.ty)) { + switch (ty_key) { .anon_struct_type => |anon_struct_type| { if (switch (aggregate.storage) { + .bytes => |bytes| for (anon_struct_type.values, bytes) |value, byte| { + if (value != ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = byte }, + } })) break false; + } else true, .elems => |elems| std.mem.eql(Index, anon_struct_type.values, elems), .repeated_elem => |elem| for (anon_struct_type.values) |value| { if (value != elem) break false; @@ -3173,34 +3542,80 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } if (switch (aggregate.storage) { + .bytes => |bytes| for (bytes[1..]) |byte| { + if (byte != bytes[0]) break false; + } else true, .elems => |elems| for (elems[1..]) |elem| { if (elem != elems[0]) break false; } else true, .repeated_elem => true, }) { + const elem = switch (aggregate.storage) { + .bytes => |bytes| elem: { + _ = ip.map.pop(); + const elem = try ip.get(gpa, .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[0] }, + } }); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + try ip.items.ensureUnusedCapacity(gpa, 1); + break :elem elem; + }, + .elems => |elems| elems[0], + .repeated_elem => |elem| elem, + }; + try ip.extra.ensureUnusedCapacity( gpa, @typeInfo(Repeated).Struct.fields.len, ); - ip.items.appendAssumeCapacity(.{ .tag = .repeated, .data = ip.addExtraAssumeCapacity(Repeated{ .ty = aggregate.ty, - .elem_val = switch (aggregate.storage) { - .elems => |elems| elems[0], - .repeated_elem => |elem| elem, - }, + .elem_val = elem, }), }); return @intToEnum(Index, ip.items.len - 1); } + switch (ty_key) { + .array_type => |array_type| if (array_type.child == .u8_type) { + const len_including_sentinel = aggregate_len + @boolToInt(array_type.sentinel != .none); + try ip.string_bytes.ensureUnusedCapacity(gpa, len_including_sentinel + 1); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); + var buffer: Key.Int.Storage.BigIntSpace = undefined; + switch (aggregate.storage) { + .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes), + .elems => |elems| for (elems) |elem| ip.string_bytes.appendAssumeCapacity( + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable, + ), + .repeated_elem => |elem| @memset( + ip.string_bytes.addManyAsSliceAssumeCapacity(aggregate_len), + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable, + ), + } + if (array_type.sentinel != .none) ip.string_bytes.appendAssumeCapacity( + ip.indexToKey(array_type.sentinel).int.storage.toBigInt(&buffer).to(u8) catch + unreachable, + ); + const bytes = try ip.getOrPutTrailingString(gpa, len_including_sentinel); + ip.items.appendAssumeCapacity(.{ + .tag = .bytes, + .data = ip.addExtraAssumeCapacity(Bytes{ + .ty = aggregate.ty, + .bytes = bytes.toString(), + }), + }); + return @intToEnum(Index, ip.items.len - 1); + }, + else => {}, + } + try ip.extra.ensureUnusedCapacity( gpa, @typeInfo(Aggregate).Struct.fields.len + aggregate_len, ); - ip.items.appendAssumeCapacity(.{ .tag = .aggregate, .data = ip.addExtraAssumeCapacity(Aggregate{ @@ -3423,12 +3838,16 @@ pub fn finishGetEnum( return @intToEnum(Index, ip.items.len - 1); } -pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { +pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; - const index = ip.map.getIndexAdapted(key, adapter).?; + const index = ip.map.getIndexAdapted(key, adapter) orelse return null; return @intToEnum(Index, index); } +pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { + return ip.getIfExists(key).?; +} + fn addStringsToMap( ip: *InternPool, gpa: Allocator, @@ -3500,9 +3919,11 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { Module.Decl.Index => @enumToInt(@field(extra, field.name)), Module.Namespace.Index => @enumToInt(@field(extra, field.name)), Module.Namespace.OptionalIndex => @enumToInt(@field(extra, field.name)), + Module.Fn.Index => @enumToInt(@field(extra, field.name)), MapIndex => @enumToInt(@field(extra, field.name)), OptionalMapIndex => @enumToInt(@field(extra, field.name)), RuntimeIndex => @enumToInt(@field(extra, field.name)), + String => @enumToInt(@field(extra, field.name)), NullTerminatedString => @enumToInt(@field(extra, field.name)), OptionalNullTerminatedString => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), @@ -3510,7 +3931,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), Pointer.VectorIndex => @enumToInt(@field(extra, field.name)), - PtrVar.Flags => @bitCast(u32, @field(extra, field.name)), + Variable.Flags => @bitCast(u32, @field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), }); } @@ -3566,9 +3987,11 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: Module.Decl.Index => @intToEnum(Module.Decl.Index, int32), Module.Namespace.Index => @intToEnum(Module.Namespace.Index, int32), Module.Namespace.OptionalIndex => @intToEnum(Module.Namespace.OptionalIndex, int32), + Module.Fn.Index => @intToEnum(Module.Fn.Index, int32), MapIndex => @intToEnum(MapIndex, int32), OptionalMapIndex => @intToEnum(OptionalMapIndex, int32), RuntimeIndex => @intToEnum(RuntimeIndex, int32), + String => @intToEnum(String, int32), NullTerminatedString => @intToEnum(NullTerminatedString, int32), OptionalNullTerminatedString => @intToEnum(OptionalNullTerminatedString, int32), i32 => @bitCast(i32, int32), @@ -3576,7 +3999,7 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32), - PtrVar.Flags => @bitCast(PtrVar.Flags, int32), + Variable.Flags => @bitCast(Variable.Flags, int32), else => @compileError("bad field type: " ++ @typeName(field.type)), }; } @@ -3700,8 +4123,8 @@ pub fn childType(ip: InternPool, i: Index) Index { /// Given a slice type, returns the type of the ptr field. pub fn slicePtrType(ip: InternPool, i: Index) Index { switch (i) { - .const_slice_u8_type => return .manyptr_const_u8_type, - .const_slice_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, + .slice_const_u8_type => return .manyptr_const_u8_type, + .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, else => {}, } const item = ip.items.get(@enumToInt(i)); @@ -3830,6 +4253,8 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind } }, } }); }, + + .lazy_align, .lazy_size => unreachable, } } @@ -3862,6 +4287,14 @@ pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { } } +pub fn indexToFunc(ip: InternPool, val: Index) Module.Fn.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + if (tags[@enumToInt(val)] != .func) return .none; + const datas = ip.items.items(.data); + return ip.extraData(Key.Func, datas[@enumToInt(val)]).index.toOptional(); +} + pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); @@ -3891,6 +4324,15 @@ pub fn isInferredErrorSetType(ip: InternPool, ty: Index) bool { return tags[@enumToInt(ty)] == .type_inferred_error_set; } +/// The is only legal because the initializer is not part of the hash. +pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { + assert(ip.items.items(.tag)[@enumToInt(index)] == .variable); + const field_index = inline for (@typeInfo(Variable).Struct.fields, 0..) |field, field_index| { + if (comptime std.mem.eql(u8, field.name, "init")) break field_index; + } else unreachable; + ip.extra.items[ip.items.items(.data)[@enumToInt(index)] + field_index] = @enumToInt(init_index); +} + pub fn dump(ip: InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } @@ -3903,10 +4345,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { (@sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); const unions_size = ip.allocated_unions.len * (@sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); + const funcs_size = ip.allocated_funcs.len * + (@sizeOf(Module.Fn) + @sizeOf(Module.Decl)); // TODO: map overhead size is not taken into account const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + - structs_size + unions_size; + structs_size + unions_size + funcs_size; std.debug.print( \\InternPool size: {d} bytes @@ -3915,6 +4359,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { \\ {d} limbs: {d} bytes \\ {d} structs: {d} bytes \\ {d} unions: {d} bytes + \\ {d} funcs: {d} bytes \\ , .{ total_size, @@ -3928,6 +4373,8 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { structs_size, ip.allocated_unions.len, unions_size, + ip.allocated_funcs.len, + funcs_size, }); const tags = ip.items.items(.tag); @@ -3982,12 +4429,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { }, .undef => 0, + .runtime_value => 0, .simple_type => 0, .simple_value => 0, - .ptr_var => @sizeOf(PtrVar), .ptr_decl => @sizeOf(PtrDecl), .ptr_mut_decl => @sizeOf(PtrMutDecl), - .ptr_int => @sizeOf(PtrInt), + .ptr_int => @sizeOf(PtrAddr), .ptr_eu_payload => 0, .ptr_opt_payload => 0, .ptr_comptime_field => @sizeOf(PtrComptimeField), @@ -4011,8 +4458,20 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { const int = ip.limbData(Int, data); break :b @sizeOf(Int) + int.limbs_len * 8; }, + + .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), + + .error_set_error, .error_union_error => @sizeOf(Key.Error), + .error_union_payload => @sizeOf(TypeValue), + .enum_literal => 0, .enum_tag => @sizeOf(Key.EnumTag), + .bytes => b: { + const info = ip.extraData(Bytes, data); + const len = @intCast(u32, ip.aggregateTypeLen(info.ty)); + break :b @sizeOf(Bytes) + len + + @boolToInt(ip.string_bytes.items[@enumToInt(info.bytes) + len - 1] != 0); + }, .aggregate => b: { const info = ip.extraData(Aggregate, data); const fields_len = @intCast(u32, ip.aggregateTypeLen(info.ty)); @@ -4028,8 +4487,9 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .float_c_longdouble_f80 => @sizeOf(Float80), .float_c_longdouble_f128 => @sizeOf(Float128), .float_comptime_float => @sizeOf(Float128), - .extern_func => @panic("TODO"), - .func => @panic("TODO"), + .variable => @sizeOf(Variable) + @sizeOf(Module.Decl), + .extern_func => @sizeOf(Key.ExternFunc) + @sizeOf(Module.Decl), + .func => @sizeOf(Key.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl), .only_possible_value => 0, .union_value => @sizeOf(Key.Union), }); @@ -4071,6 +4531,14 @@ pub fn unionPtrConst(ip: InternPool, index: Module.Union.Index) *const Module.Un return ip.allocated_unions.at(@enumToInt(index)); } +pub fn funcPtr(ip: *InternPool, index: Module.Fn.Index) *Module.Fn { + return ip.allocated_funcs.at(@enumToInt(index)); +} + +pub fn funcPtrConst(ip: InternPool, index: Module.Fn.Index) *const Module.Fn { + return ip.allocated_funcs.at(@enumToInt(index)); +} + pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.Index) *Module.Fn.InferredErrorSet { return ip.allocated_inferred_error_sets.at(@enumToInt(index)); } @@ -4117,6 +4585,25 @@ pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) }; } +pub fn createFunc( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Fn, +) Allocator.Error!Module.Fn.Index { + if (ip.funcs_free_list.popOrNull()) |index| return index; + const ptr = try ip.allocated_funcs.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Fn.Index, ip.allocated_funcs.len - 1); +} + +pub fn destroyFunc(ip: *InternPool, gpa: Allocator, index: Module.Fn.Index) void { + ip.funcPtr(index).* = undefined; + ip.funcs_free_list.append(gpa, index) catch { + // In order to keep `destroyFunc` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Union until garbage collection. + }; +} + pub fn createInferredErrorSet( ip: *InternPool, gpa: Allocator, @@ -4142,9 +4629,25 @@ pub fn getOrPutString( s: []const u8, ) Allocator.Error!NullTerminatedString { const string_bytes = &ip.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len); try string_bytes.ensureUnusedCapacity(gpa, s.len + 1); string_bytes.appendSliceAssumeCapacity(s); + string_bytes.appendAssumeCapacity(0); + return ip.getOrPutTrailingString(gpa, s.len + 1); +} + +/// Uses the last len bytes of ip.string_bytes as the key. +pub fn getOrPutTrailingString( + ip: *InternPool, + gpa: Allocator, + len: usize, +) Allocator.Error!NullTerminatedString { + const string_bytes = &ip.string_bytes; + const str_index = @intCast(u32, string_bytes.items.len - len); + if (len > 0 and string_bytes.getLast() == 0) { + _ = string_bytes.pop(); + } else { + try string_bytes.ensureUnusedCapacity(gpa, 1); + } const key: []const u8 = string_bytes.items[str_index..]; const gop = try ip.string_table.getOrPutContextAdapted(gpa, key, std.hash_map.StringIndexAdapter{ .bytes = string_bytes, @@ -4179,6 +4682,10 @@ pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { return string_bytes[start..end :0]; } +pub fn stringToSliceUnwrap(ip: InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { + return ip.stringToSlice(s.unwrap() orelse return null); +} + pub fn typeOf(ip: InternPool, index: Index) Index { return ip.indexToKey(index).typeOf(); } @@ -4199,7 +4706,7 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { }; } -pub fn isNoReturn(ip: InternPool, ty: InternPool.Index) bool { +pub fn isNoReturn(ip: InternPool, ty: Index) bool { return switch (ty) { .noreturn_type => true, else => switch (ip.indexToKey(ty)) { diff --git a/src/Module.zig b/src/Module.zig index 8174778f48ef..fa24c237b43c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -109,7 +109,7 @@ memoized_calls: MemoizedCallSet = .{}, /// Contains the values from `@setAlignStack`. A sparse table is used here /// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while /// functions are many. -align_stack_fns: std.AutoHashMapUnmanaged(*const Fn, SetAlignStack) = .{}, +align_stack_fns: std.AutoHashMapUnmanaged(Fn.Index, SetAlignStack) = .{}, /// We optimize memory usage for a compilation with no compile errors by storing the /// error messages and mapping outside of `Decl`. @@ -242,22 +242,23 @@ pub const StringLiteralAdapter = struct { }; const MonomorphedFuncsSet = std.HashMapUnmanaged( - *Fn, + Fn.Index, void, MonomorphedFuncsContext, std.hash_map.default_max_load_percentage, ); const MonomorphedFuncsContext = struct { - pub fn eql(ctx: @This(), a: *Fn, b: *Fn) bool { + mod: *Module, + + pub fn eql(ctx: @This(), a: Fn.Index, b: Fn.Index) bool { _ = ctx; return a == b; } /// Must match `Sema.GenericCallAdapter.hash`. - pub fn hash(ctx: @This(), key: *Fn) u64 { - _ = ctx; - return key.hash; + pub fn hash(ctx: @This(), key: Fn.Index) u64 { + return ctx.mod.funcPtr(key).hash; } }; @@ -272,7 +273,7 @@ pub const MemoizedCall = struct { module: *Module, pub const Key = struct { - func: *Fn, + func: Fn.Index, args: []TypedValue, }; @@ -652,21 +653,12 @@ pub const Decl = struct { pub fn clearValues(decl: *Decl, mod: *Module) void { const gpa = mod.gpa; - if (decl.getExternFn()) |extern_fn| { - extern_fn.deinit(gpa); - gpa.destroy(extern_fn); - } - if (decl.getFunction()) |func| { + if (decl.getFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); - if (func.comptime_args != null) { - _ = mod.monomorphed_funcs.remove(func); + if (mod.funcPtr(func).comptime_args != null) { + _ = mod.monomorphed_funcs.removeContext(func, .{ .mod = mod }); } - func.deinit(gpa); - gpa.destroy(func); - } - if (decl.getVariable()) |variable| { - variable.deinit(gpa); - gpa.destroy(variable); + mod.destroyFunc(func); } if (decl.value_arena) |value_arena| { if (decl.owns_tv) { @@ -835,11 +827,11 @@ pub const Decl = struct { /// If the Decl has a value and it is a struct, return it, /// otherwise null. - pub fn getStruct(decl: *Decl, mod: *Module) ?*Struct { - return mod.structPtrUnwrap(getStructIndex(decl, mod)); + pub fn getStruct(decl: Decl, mod: *Module) ?*Struct { + return mod.structPtrUnwrap(decl.getStructIndex(mod)); } - pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex { + pub fn getStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex { if (!decl.owns_tv) return .none; if (decl.val.ip_index == .none) return .none; return mod.intern_pool.indexToStructType(decl.val.ip_index); @@ -847,7 +839,7 @@ pub const Decl = struct { /// If the Decl has a value and it is a union, return it, /// otherwise null. - pub fn getUnion(decl: *Decl, mod: *Module) ?*Union { + pub fn getUnion(decl: Decl, mod: *Module) ?*Union { if (!decl.owns_tv) return null; if (decl.val.ip_index == .none) return null; return mod.typeToUnion(decl.val.toType()); @@ -855,32 +847,30 @@ pub const Decl = struct { /// If the Decl has a value and it is a function, return it, /// otherwise null. - pub fn getFunction(decl: *const Decl) ?*Fn { - if (!decl.owns_tv) return null; - const func = (decl.val.castTag(.function) orelse return null).data; - return func; + pub fn getFunction(decl: Decl, mod: *Module) ?*Fn { + return mod.funcPtrUnwrap(decl.getFunctionIndex(mod)); + } + + pub fn getFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex { + return if (decl.owns_tv) decl.val.getFunctionIndex(mod) else .none; } /// If the Decl has a value and it is an extern function, returns it, /// otherwise null. - pub fn getExternFn(decl: *const Decl) ?*ExternFn { - if (!decl.owns_tv) return null; - const extern_fn = (decl.val.castTag(.extern_fn) orelse return null).data; - return extern_fn; + pub fn getExternFunc(decl: Decl, mod: *Module) ?InternPool.Key.ExternFunc { + return if (decl.owns_tv) decl.val.getExternFunc(mod) else null; } /// If the Decl has a value and it is a variable, returns it, /// otherwise null. - pub fn getVariable(decl: *const Decl) ?*Var { - if (!decl.owns_tv) return null; - const variable = (decl.val.castTag(.variable) orelse return null).data; - return variable; + pub fn getVariable(decl: Decl, mod: *Module) ?InternPool.Key.Variable { + return if (decl.owns_tv) decl.val.getVariable(mod) else null; } /// Gets the namespace that this Decl creates by being a struct, union, /// enum, or opaque. /// Only returns it if the Decl is the owner. - pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex { + pub fn getInnerNamespaceIndex(decl: Decl, mod: *Module) Namespace.OptionalIndex { if (!decl.owns_tv) return .none; return switch (decl.val.ip_index) { .empty_struct_type => .none, @@ -896,8 +886,8 @@ pub const Decl = struct { } /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. - pub fn getInnerNamespace(decl: *Decl, mod: *Module) ?*Namespace { - return if (getInnerNamespaceIndex(decl, mod).unwrap()) |i| mod.namespacePtr(i) else null; + pub fn getInnerNamespace(decl: Decl, mod: *Module) ?*Namespace { + return if (decl.getInnerNamespaceIndex(mod).unwrap()) |i| mod.namespacePtr(i) else null; } pub fn dump(decl: *Decl) void { @@ -927,14 +917,11 @@ pub const Decl = struct { assert(decl.dependencies.swapRemove(other)); } - pub fn isExtern(decl: Decl) bool { + pub fn isExtern(decl: Decl, mod: *Module) bool { assert(decl.has_tv); - return switch (decl.val.ip_index) { - .none => switch (decl.val.tag()) { - .extern_fn => true, - .variable => decl.val.castTag(.variable).?.data.init.ip_index == .unreachable_value, - else => false, - }, + return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + .variable => |variable| variable.is_extern, + .extern_func => true, else => false, }; } @@ -1494,6 +1481,28 @@ pub const Fn = struct { is_noinline: bool, calls_or_awaits_errorable_fn: bool = false, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + pub const Analysis = enum { /// This function has not yet undergone analysis, because we have not /// seen a potential runtime call. It may be analyzed in future. @@ -1519,7 +1528,7 @@ pub const Fn = struct { /// or comptime functions. pub const InferredErrorSet = struct { /// The function from which this error set originates. - func: *Fn, + func: Fn.Index, /// All currently known errors that this error set contains. This includes /// direct additions via `return error.Foo;`, and possibly also errors that @@ -1543,8 +1552,8 @@ pub const Fn = struct { pub const Index = enum(u32) { _, - pub fn toOptional(i: Index) OptionalIndex { - return @intToEnum(OptionalIndex, @enumToInt(i)); + pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex { + return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(i)); } }; @@ -1552,13 +1561,13 @@ pub const Fn = struct { none = std.math.maxInt(u32), _, - pub fn init(oi: ?Index) OptionalIndex { - return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex { + return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(oi orelse return .none)); } - pub fn unwrap(oi: OptionalIndex) ?Index { + pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index { if (oi == .none) return null; - return @intToEnum(Index, @enumToInt(oi)); + return @intToEnum(InferredErrorSet.Index, @enumToInt(oi)); } }; @@ -1587,12 +1596,6 @@ pub const Fn = struct { } }; - /// TODO: remove this function - pub fn deinit(func: *Fn, gpa: Allocator) void { - _ = func; - _ = gpa; - } - pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool { const file = mod.declPtr(func.owner_decl).getFileScope(mod); @@ -1647,28 +1650,6 @@ pub const Fn = struct { } }; -pub const Var = struct { - /// if is_extern == true this is undefined - init: Value, - owner_decl: Decl.Index, - - /// Library name if specified. - /// For example `extern "c" var stderrp = ...` would have 'c' as library name. - /// Allocated with Module's allocator; outlives the ZIR code. - lib_name: ?[*:0]const u8, - - is_extern: bool, - is_mutable: bool, - is_threadlocal: bool, - is_weak_linkage: bool, - - pub fn deinit(variable: *Var, gpa: Allocator) void { - if (variable.lib_name) |lib_name| { - gpa.free(mem.sliceTo(lib_name, 0)); - } - } -}; - pub const DeclAdapter = struct { mod: *Module, @@ -3472,6 +3453,10 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { return mod.intern_pool.structPtr(index); } +pub fn funcPtr(mod: *Module, index: Fn.Index) *Fn { + return mod.intern_pool.funcPtr(index); +} + pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.InferredErrorSet { return mod.intern_pool.inferredErrorSetPtr(index); } @@ -3479,7 +3464,11 @@ pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.I /// This one accepts an index from the InternPool and asserts that it is not /// the anonymous empty struct type. pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { - return structPtr(mod, index.unwrap() orelse return null); + return mod.structPtr(index.unwrap() orelse return null); +} + +pub fn funcPtrUnwrap(mod: *Module, index: Fn.OptionalIndex) ?*Fn { + return mod.funcPtr(index.unwrap() orelse return null); } /// Returns true if and only if the Decl is the top level struct associated with a File. @@ -3952,7 +3941,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { }; } - if (decl.getFunction()) |func| { + if (decl.getFunction(mod)) |func| { func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse { try file.deleted_decls.append(gpa, decl_index); continue; @@ -4139,7 +4128,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.deleteDeclExports(decl_index); // Similarly, `@setAlignStack` invocations will be re-discovered. - if (decl.getFunction()) |func| { + if (decl.getFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); } @@ -4229,10 +4218,11 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { } } -pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { +pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void { const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4264,7 +4254,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { defer tmp_arena.deinit(); const sema_arena = tmp_arena.allocator(); - var air = mod.analyzeFnBody(func, sema_arena) catch |err| switch (err) { + var air = mod.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { error.AnalysisFail => { if (func.state == .in_progress) { // If this decl caused the compile error, the analysis field would @@ -4333,7 +4323,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { if (no_bin_file and !dump_llvm_ir) return; - comp.bin_file.updateFunc(mod, func, air, liveness) catch |err| switch (err) { + comp.bin_file.updateFunc(mod, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { decl.analysis = .codegen_failure; @@ -4363,7 +4353,8 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { /// analyzed, and for ensuring it can exist at runtime (see /// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body /// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`. -pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void { +pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void { + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4401,7 +4392,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void { // Decl itself is safely analyzed, and body analysis is not yet queued - try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); + try mod.comp.work_queue.writeItem(.{ .codegen_func = func_index }); if (mod.emit_h != null) { // TODO: we ideally only want to do this if the function's type changed // since the last update @@ -4532,8 +4523,10 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .owner_decl = new_decl, .owner_decl_index = new_decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -4628,8 +4621,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -4707,8 +4702,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return true; } - if (decl_tv.val.castTag(.function)) |fn_payload| { - const func = fn_payload.data; + if (mod.intern_pool.indexToFunc(decl_tv.val.ip_index).unwrap()) |func_index| { + const func = mod.funcPtr(func_index); const owns_tv = func.owner_decl == decl_index; if (owns_tv) { var prev_type_has_bits = false; @@ -4718,7 +4713,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (decl.has_tv) { prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); type_changed = !decl.ty.eql(decl_tv.ty, mod); - if (decl.getFunction()) |prev_func| { + if (decl.getFunction(mod)) |prev_func| { prev_is_inline = prev_func.state == .inline_only; } } @@ -4757,38 +4752,25 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { switch (decl_tv.val.ip_index) { .generic_poison => unreachable, .unreachable_value => unreachable, - - .none => switch (decl_tv.val.tag()) { - .variable => { - const variable = decl_tv.val.castTag(.variable).?.data; - if (variable.owner_decl == decl_index) { - decl.owns_tv = true; - queue_linker_work = true; - - const copied_init = try variable.init.copy(decl_arena_allocator); - variable.init = copied_init; - } + else => switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) { + .variable => |variable| if (variable.decl == decl_index) { + decl.owns_tv = true; + queue_linker_work = true; }, - .extern_fn => { - const extern_fn = decl_tv.val.castTag(.extern_fn).?.data; - if (extern_fn.owner_decl == decl_index) { - decl.owns_tv = true; - queue_linker_work = true; - is_extern = true; - } + + .extern_func => |extern_fn| if (extern_fn.decl == decl_index) { + decl.owns_tv = true; + queue_linker_work = true; + is_extern = true; }, - .function => {}, + .func => {}, else => { log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name }); queue_linker_work = true; }, }, - else => { - log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name }); - queue_linker_work = true; - }, } decl.ty = decl_tv.ty; @@ -4810,12 +4792,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr; }; decl.@"addrspace" = blk: { - const addrspace_ctx: Sema.AddressSpaceContext = switch (decl_tv.val.ip_index) { - .none => switch (decl_tv.val.tag()) { - .function, .extern_fn => .function, - .variable => .variable, - else => .constant, - }, + const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) { + .variable => .variable, + .extern_func, .func => .function, else => .constant, }; @@ -5388,7 +5367,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err decl.has_align = has_align; decl.has_linksection_or_addrspace = has_linksection_or_addrspace; decl.zir_decl_index = @intCast(u32, decl_sub_index); - if (decl.getFunction()) |_| { + if (decl.getFunctionIndex(mod) != .none) { switch (comp.bin_file.tag) { .coff, .elf, .macho, .plan9 => { // TODO Look into detecting when this would be unnecessary by storing enough state @@ -5572,11 +5551,12 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void export_owners.deinit(mod.gpa); } -pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { +pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -5597,8 +5577,10 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { .owner_decl = decl, .owner_decl_index = decl_index, .func = func, + .func_index = func_index.toOptional(), .fn_ret_ty = fn_ty_info.return_type.toType(), .owner_func = func, + .owner_func_index = func_index.toOptional(), .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), }; defer sema.deinit(); @@ -5807,8 +5789,7 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { for (kv.value) |err| err.deinit(mod.gpa); } if (decl.has_tv and decl.owns_tv) { - if (decl.val.castTag(.function)) |payload| { - const func = payload.data; + if (decl.getFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); } } @@ -5852,6 +5833,14 @@ pub fn destroyUnion(mod: *Module, index: Union.Index) void { return mod.intern_pool.destroyUnion(mod.gpa, index); } +pub fn createFunc(mod: *Module, initialization: Fn) Allocator.Error!Fn.Index { + return mod.intern_pool.createFunc(mod.gpa, initialization); +} + +pub fn destroyFunc(mod: *Module, index: Fn.Index) void { + return mod.intern_pool.destroyFunc(mod.gpa, index); +} + pub fn allocateNewDecl( mod: *Module, namespace: Namespace.Index, @@ -6499,7 +6488,11 @@ pub fn populateTestFunctions( try mod.ensureDeclAnalyzed(decl_index); } const decl = mod.declPtr(decl_index); - const tmp_test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod); + const test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod); + const null_usize = try mod.intern(.{ .opt = .{ + .ty = try mod.intern(.{ .opt_type = .usize_type }), + .val = .none, + } }); const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions @@ -6512,7 +6505,7 @@ pub fn populateTestFunctions( const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ .ty = try mod.arrayType(.{ .len = test_fn_vals.len, - .child = tmp_test_fn_ty.ip_index, + .child = test_fn_ty.ip_index, .sentinel = .none, }), .val = try Value.Tag.aggregate.create(arena, test_fn_vals), @@ -6530,7 +6523,7 @@ pub fn populateTestFunctions( errdefer name_decl_arena.deinit(); const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice); const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ - .ty = try Type.array(name_decl_arena.allocator(), bytes.len, null, Type.u8, mod), + .ty = try mod.arrayType(.{ .len = bytes.len, .child = .u8_type }), .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes), }); try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena); @@ -6540,16 +6533,24 @@ pub fn populateTestFunctions( array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, .normal); try mod.linkerUpdateDecl(test_name_decl_index); - const field_vals = try arena.create([3]Value); - field_vals.* = .{ - try Value.Tag.slice.create(arena, .{ - .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl_index), - .len = try mod.intValue(Type.usize, test_name_slice.len), - }), // name - try Value.Tag.decl_ref.create(arena, test_decl_index), // func - Value.null, // async_frame_size + const test_fn_fields = .{ + // name + try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = test_name_decl_index }, + } }), + // func + try mod.intern(.{ .ptr = .{ + .ty = test_decl.ty.ip_index, + .addr = .{ .decl = test_decl_index }, + } }), + // async_frame_size + null_usize, }; - test_fn_vals[i] = try Value.Tag.aggregate.create(arena, field_vals); + test_fn_vals[i] = (try mod.intern(.{ .aggregate = .{ + .ty = test_fn_ty.ip_index, + .storage = .{ .elems = &test_fn_fields }, + } })).toValue(); } try array_decl.finalizeNewArena(&new_decl_arena); @@ -6558,36 +6559,25 @@ pub fn populateTestFunctions( try mod.linkerUpdateDecl(array_decl_index); { - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const arena = new_decl_arena.allocator(); - - { - // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. - const new_ty = try Type.ptr(arena, mod, .{ - .size = .Slice, - .pointee_type = tmp_test_fn_ty, - .mutable = false, - .@"addrspace" = .generic, - }); - const new_var = try gpa.create(Var); - errdefer gpa.destroy(new_var); - new_var.* = decl.val.castTag(.variable).?.data.*; - new_var.init = try Value.Tag.slice.create(arena, .{ - .ptr = try Value.Tag.decl_ref.create(arena, array_decl_index), - .len = try mod.intValue(Type.usize, mod.test_functions.count()), - }); - const new_val = try Value.Tag.variable.create(arena, new_var); - - // Since we are replacing the Decl's value we must perform cleanup on the - // previous value. - decl.clearValues(mod); - decl.ty = new_ty; - decl.val = new_val; - decl.has_tv = true; - } + const new_ty = try mod.ptrType(.{ + .elem_type = test_fn_ty.ip_index, + .is_const = true, + .size = .Slice, + }); + const new_val = decl.val; + const new_init = try mod.intern(.{ .ptr = .{ + .ty = new_ty.ip_index, + .addr = .{ .decl = array_decl_index }, + .len = (try mod.intValue(Type.usize, mod.test_functions.count())).ip_index, + } }); + mod.intern_pool.mutateVarInit(decl.val.ip_index, new_init); - try decl.finalizeNewArena(&new_decl_arena); + // Since we are replacing the Decl's value we must perform cleanup on the + // previous value. + decl.clearValues(mod); + decl.ty = new_ty; + decl.val = new_val; + decl.has_tv = true; } try mod.linkerUpdateDecl(decl_index); } @@ -6660,50 +6650,47 @@ fn reportRetryableFileError( } pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void { - if (val.ip_index != .none) return; - switch (val.tag()) { - .decl_ref_mut => return mod.markDeclIndexAlive(val.castTag(.decl_ref_mut).?.data.decl_index), - .extern_fn => return mod.markDeclIndexAlive(val.castTag(.extern_fn).?.data.owner_decl), - .function => return mod.markDeclIndexAlive(val.castTag(.function).?.data.owner_decl), - .variable => return mod.markDeclIndexAlive(val.castTag(.variable).?.data.owner_decl), - .decl_ref => return mod.markDeclIndexAlive(val.cast(Value.Payload.Decl).?.data), - - .repeated, - .eu_payload, - .opt_payload, - .empty_array_sentinel, - => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.SubValue).?.data), - - .eu_payload_ptr, - .opt_payload_ptr, - => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.PayloadPtr).?.data.container_ptr), - - .slice => { - const slice = val.cast(Value.Payload.Slice).?.data; - mod.markReferencedDeclsAlive(slice.ptr); - mod.markReferencedDeclsAlive(slice.len); - }, - - .elem_ptr => { - const elem_ptr = val.cast(Value.Payload.ElemPtr).?.data; - return mod.markReferencedDeclsAlive(elem_ptr.array_ptr); - }, - .field_ptr => { - const field_ptr = val.cast(Value.Payload.FieldPtr).?.data; - return mod.markReferencedDeclsAlive(field_ptr.container_ptr); - }, - .aggregate => { - for (val.castTag(.aggregate).?.data) |field_val| { - mod.markReferencedDeclsAlive(field_val); - } + switch (val.ip_index) { + .none => switch (val.tag()) { + .aggregate => { + for (val.castTag(.aggregate).?.data) |field_val| { + mod.markReferencedDeclsAlive(field_val); + } + }, + .@"union" => { + const data = val.castTag(.@"union").?.data; + mod.markReferencedDeclsAlive(data.tag); + mod.markReferencedDeclsAlive(data.val); + }, + else => {}, }, - .@"union" => { - const data = val.cast(Value.Payload.Union).?.data; - mod.markReferencedDeclsAlive(data.tag); - mod.markReferencedDeclsAlive(data.val); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => |variable| mod.markDeclIndexAlive(variable.decl), + .extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl), + .func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), + .error_union => |error_union| switch (error_union.val) { + .err_name => {}, + .payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()), + }, + .ptr => |ptr| { + switch (ptr.addr) { + .decl => |decl| mod.markDeclIndexAlive(decl), + .mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl), + .int, .comptime_field => {}, + .eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()), + .elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()), + } + if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue()); + }, + .opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()), + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| + mod.markReferencedDeclsAlive(elem.toValue()), + .un => |un| { + mod.markReferencedDeclsAlive(un.tag.toValue()); + mod.markReferencedDeclsAlive(un.val.toValue()); + }, + else => {}, }, - - else => {}, } } @@ -7075,6 +7062,12 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { return @intCast(u16, big.bitCountTwosComp()); }, + .lazy_align => |lazy_ty| { + return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @boolToInt(sign); + }, + .lazy_size => |lazy_ty| { + return Type.smallestUnsignedBits(lazy_ty.toType().abiSize(mod)) + @boolToInt(sign); + }, } } diff --git a/src/Sema.zig b/src/Sema.zig index 7df6e44898a5..d9b346e63829 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -28,10 +28,12 @@ owner_decl_index: Decl.Index, /// For an inline or comptime function call, this will be the root parent function /// which contains the callsite. Corresponds to `owner_decl`. owner_func: ?*Module.Fn, +owner_func_index: Module.Fn.OptionalIndex, /// The function this ZIR code is the body of, according to the source code. /// This starts out the same as `owner_func` and then diverges in the case of /// an inline or comptime function call. func: ?*Module.Fn, +func_index: Module.Fn.OptionalIndex, /// Used to restore the error return trace when returning a non-error from a function. error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none, /// When semantic analysis needs to know the return type of the function whose body @@ -65,7 +67,7 @@ comptime_args_fn_inst: Zir.Inst.Index = 0, /// to use this instead of allocating a fresh one. This avoids an unnecessary /// extra hash table lookup in the `monomorphed_funcs` set. /// Sema will set this to null when it takes ownership. -preallocated_new_func: ?*Module.Fn = null, +preallocated_new_func: Module.Fn.OptionalIndex = .none, /// The key is types that must be fully resolved prior to machine code /// generation pass. Types are added to this set when resolving them /// immediately could cause a dependency loop, but they do need to be resolved @@ -92,7 +94,7 @@ unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{} const std = @import("std"); const math = std.math; const mem = std.mem; -const Allocator = std.mem.Allocator; +const Allocator = mem.Allocator; const assert = std.debug.assert; const log = std.log.scoped(.sema); @@ -1777,7 +1779,7 @@ pub fn resolveConstString( reason: []const u8, ) ![]u8 { const air_inst = try sema.resolveInst(zir_ref); - const wanted_type = Type.const_slice_u8; + const wanted_type = Type.slice_const_u8; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, reason); return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod); @@ -1866,11 +1868,10 @@ fn resolveConstMaybeUndefVal( if (try sema.resolveMaybeUndefValAllowVariables(inst)) |val| { switch (val.ip_index) { .generic_poison => return error.GenericPoison, - .none => switch (val.tag()) { + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, }, - else => return val, } } return sema.failWithNeededComptime(block, src, reason); @@ -1889,11 +1890,11 @@ fn resolveConstValue( switch (val.ip_index) { .generic_poison => return error.GenericPoison, .undef => return sema.failWithUseOfUndef(block, src), - .none => switch (val.tag()) { + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + .undef => return sema.failWithUseOfUndef(block, src), .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, }, - else => return val, } } return sema.failWithNeededComptime(block, src, reason); @@ -1928,11 +1929,11 @@ fn resolveMaybeUndefVal( const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null; switch (val.ip_index) { .generic_poison => return error.GenericPoison, - .none => switch (val.tag()) { + .none => return val, + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { .variable => return null, else => return val, }, - else => return val, } } @@ -1948,21 +1949,20 @@ fn resolveMaybeUndefValIntable( var check = val; while (true) switch (check.ip_index) { .generic_poison => return error.GenericPoison, - .none => switch (check.tag()) { - .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return null, - .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr, - .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr, - .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr, - else => { - try sema.resolveLazyValue(val); - return val; + .none => break, + else => switch (sema.mod.intern_pool.indexToKey(check.ip_index)) { + .variable => return null, + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => return null, + .int => break, + .eu_payload, .opt_payload => |base| check = base.toValue(), + .elem, .field => |base_index| check = base_index.base.toValue(), }, - }, - else => { - try sema.resolveLazyValue(val); - return val; + else => break, }, }; + try sema.resolveLazyValue(val); + return val; } /// Returns all Value tags including `variable` and `undef`. @@ -1994,7 +1994,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( if (air_tags[i] == .constant) { const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; const val = sema.air_values.items[ty_pl.payload]; - if (val.tagIsVariable()) return val; + if (val.getVariable(sema.mod) != null) return val; } return opv; } @@ -2003,7 +2003,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( .constant => { const ty_pl = air_datas[i].ty_pl; const val = sema.air_values.items[ty_pl.payload]; - if (val.isRuntimeValue()) make_runtime.* = true; + if (val.isRuntimeValue(sema.mod)) make_runtime.* = true; if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, @@ -2489,13 +2489,13 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE .@"addrspace" = addr_space, }); try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); - return sema.addConstant( - ptr_ty, - try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .decl_index = iac.data.decl_index, + return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ + .ty = ptr_ty.ip_index, + .addr = .{ .mut_decl = .{ + .decl = iac.data.decl_index, .runtime_index = block.runtime_index, - }), - ); + } }, + } })).toValue()); }, else => {}, } @@ -2949,12 +2949,18 @@ fn zirEnumDecl( } const prev_owner_func = sema.owner_func; + const prev_owner_func_index = sema.owner_func_index; sema.owner_func = null; + sema.owner_func_index = .none; defer sema.owner_func = prev_owner_func; + defer sema.owner_func_index = prev_owner_func_index; const prev_func = sema.func; + const prev_func_index = sema.func_index; sema.func = null; + sema.func_index = .none; defer sema.func = prev_func; + defer sema.func_index = prev_func_index; var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); defer wip_captures.deinit(); @@ -3735,14 +3741,13 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst; try sema.maybeQueueFuncBodyAnalysis(decl_index); - if (var_is_mut) { - sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .decl_index = decl_index, + sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ + .ty = final_ptr_ty.ip_index, + .addr = if (var_is_mut) .{ .mut_decl = .{ + .decl = decl_index, .runtime_index = block.runtime_index, - }); - } else { - sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl_index); - } + } } else .{ .decl = decl_index }, + } })).toValue(); }, .inferred_alloc => { assert(sema.unresolved_inferred_allocs.remove(ptr_inst)); @@ -3836,7 +3841,10 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // block so that codegen does not see it. block.instructions.shrinkRetainingCapacity(search_index); try sema.maybeQueueFuncBodyAnalysis(new_decl_index); - sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); + sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ + .ty = final_elem_ty.ip_index, + .addr = .{ .decl = new_decl_index }, + } })).toValue(); // if bitcast ty ref needs to be made const, make_ptr_const // ZIR handles it later, so we can just use the ty ref here. air_datas[ptr_inst].ty_pl.ty = air_datas[bitcast_inst].ty_op.ty; @@ -4332,12 +4340,16 @@ fn validateUnionInit( // instead a single `store` to the result ptr with a comptime union value. block.instructions.shrinkRetainingCapacity(first_block_index); - var union_val = try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = val, - }); - if (make_runtime) union_val = try Value.Tag.runtime_value.create(sema.arena, union_val); - const union_init = try sema.addConstant(union_ty, union_val); + var union_val = try mod.intern(.{ .un = .{ + .ty = union_ty.ip_index, + .tag = tag_val.ip_index, + .val = val.ip_index, + } }); + if (make_runtime) union_val = try mod.intern(.{ .runtime_value = .{ + .ty = union_ty.ip_index, + .val = union_val, + } }); + const union_init = try sema.addConstant(union_ty, union_val.toValue()); try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store); return; } else if (try sema.typeRequiresComptime(union_ty)) { @@ -4464,14 +4476,15 @@ fn validateStructInit( // We collect the comptime field values in case the struct initialization // ends up being comptime-known. - const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount(mod)); + const field_values = try sema.gpa.alloc(InternPool.Index, struct_ty.structFieldCount(mod)); + defer sema.gpa.free(field_values); field: for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) { // Determine whether the value stored to this pointer is comptime-known. const field_ty = struct_ty.structFieldType(i, mod); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { - field_values[i] = opv; + field_values[i] = opv.ip_index; continue; } @@ -4536,7 +4549,7 @@ fn validateStructInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - field_values[i] = val; + field_values[i] = val.ip_index; } else if (require_comptime) { const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only struct must be comptime-known"); @@ -4570,7 +4583,7 @@ fn validateStructInit( } continue; } - field_values[i] = default_val; + field_values[i] = default_val.ip_index; } if (root_msg) |msg| { @@ -4593,9 +4606,15 @@ fn validateStructInit( // instead a single `store` to the struct_ptr with a comptime struct value. block.instructions.shrinkRetainingCapacity(first_block_index); - var struct_val = try Value.Tag.aggregate.create(sema.arena, field_values); - if (make_runtime) struct_val = try Value.Tag.runtime_value.create(sema.arena, struct_val); - const struct_init = try sema.addConstant(struct_ty, struct_val); + var struct_val = try mod.intern(.{ .aggregate = .{ + .ty = struct_ty.ip_index, + .storage = .{ .elems = field_values }, + } }); + if (make_runtime) struct_val = try mod.intern(.{ .runtime_value = .{ + .ty = struct_ty.ip_index, + .val = struct_val, + } }); + const struct_init = try sema.addConstant(struct_ty, struct_val.toValue()); try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store); return; } @@ -4611,7 +4630,7 @@ fn validateStructInit( else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); const field_ty = sema.typeOf(default_field_ptr).childType(mod); - const init = try sema.addConstant(field_ty, field_values[i]); + const init = try sema.addConstant(field_ty, field_values[i].toValue()); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } } @@ -4691,7 +4710,8 @@ fn zirValidateArrayInit( // Collect the comptime element values in case the array literal ends up // being comptime-known. const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod)); - const element_vals = try sema.arena.alloc(Value, array_len_s); + const element_vals = try sema.gpa.alloc(InternPool.Index, array_len_s); + defer sema.gpa.free(element_vals); const opt_opv = try sema.typeHasOnePossibleValue(array_ty); const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); @@ -4701,13 +4721,13 @@ fn zirValidateArrayInit( if (array_ty.isTuple(mod)) { if (try array_ty.structFieldValueComptime(mod, i)) |opv| { - element_vals[i] = opv; + element_vals[i] = opv.ip_index; continue; } } else { // Array has one possible value, so value is always comptime-known if (opt_opv) |opv| { - element_vals[i] = opv; + element_vals[i] = opv.ip_index; continue; } } @@ -4768,7 +4788,7 @@ fn zirValidateArrayInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - element_vals[i] = val; + element_vals[i] = val.ip_index; } else { array_is_comptime = false; } @@ -4780,9 +4800,12 @@ fn zirValidateArrayInit( if (array_is_comptime) { if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| { - if (ptr_val.tag() == .comptime_field_ptr) { - // This store was validated by the individual elem ptrs. - return; + switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .comptime_field => return, // This store was validated by the individual elem ptrs. + else => {}, + }, + else => {}, } } @@ -4790,14 +4813,20 @@ fn zirValidateArrayInit( // instead a single `store` to the array_ptr with a comptime struct value. // Also to populate the sentinel value, if any. if (array_ty.sentinel(mod)) |sentinel_val| { - element_vals[instrs.len] = sentinel_val; + element_vals[instrs.len] = sentinel_val.ip_index; } block.instructions.shrinkRetainingCapacity(first_block_index); - var array_val = try Value.Tag.aggregate.create(sema.arena, element_vals); - if (make_runtime) array_val = try Value.Tag.runtime_value.create(sema.arena, array_val); - const array_init = try sema.addConstant(array_ty, array_val); + var array_val = try mod.intern(.{ .aggregate = .{ + .ty = array_ty.ip_index, + .storage = .{ .elems = element_vals }, + } }); + if (make_runtime) array_val = try mod.intern(.{ .runtime_value = .{ + .ty = array_ty.ip_index, + .val = array_val, + } }); + const array_init = try sema.addConstant(array_ty, array_val.toValue()); try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store); } } @@ -5029,7 +5058,7 @@ fn storeToInferredAllocComptime( // There will be only one store_to_inferred_ptr because we are running at comptime. // The alloc will turn into a Decl. if (try sema.resolveMaybeUndefValAllowVariables(operand)) |operand_val| store: { - if (operand_val.tagIsVariable()) break :store; + if (operand_val.getVariable(sema.mod) != null) break :store; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); iac.data.decl_index = try anon_decl.finish( @@ -5717,8 +5746,8 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { try mod.ensureDeclAnalyzed(decl_index); const exported_decl = mod.declPtr(decl_index); - if (exported_decl.val.castTag(.function)) |some| { - return sema.analyzeExport(block, src, options, some.data.owner_decl); + if (exported_decl.getFunction(mod)) |function| { + return sema.analyzeExport(block, src, options, function.owner_decl); } } try sema.analyzeExport(block, src, options, decl_index); @@ -5741,17 +5770,14 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError }, else => |e| return e, }; - const decl_index = switch (operand.val.tag()) { - .function => operand.val.castTag(.function).?.data.owner_decl, - else => blk: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - break :blk try anon_decl.finish( - operand.ty, - try operand.val.copy(anon_decl.arena()), - 0, - ); - }, + const decl_index = if (operand.val.getFunction(sema.mod)) |function| function.owner_decl else blk: { + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + break :blk try anon_decl.finish( + operand.ty, + try operand.val.copy(anon_decl.arena()), + 0, + ); }; try sema.analyzeExport(block, src, options, decl_index); } @@ -5788,7 +5814,7 @@ pub fn analyzeExport( } // TODO: some backends might support re-exporting extern decls - if (exported_decl.isExtern()) { + if (exported_decl.isExtern(mod)) { return sema.fail(block, src, "export target cannot be extern", .{}); } @@ -5796,7 +5822,7 @@ pub fn analyzeExport( mod.markDeclAlive(exported_decl); try sema.maybeQueueFuncBodyAnalysis(exported_decl_index); - const gpa = mod.gpa; + const gpa = sema.gpa; try mod.decl_exports.ensureUnusedCapacity(gpa, 1); try mod.export_owners.ensureUnusedCapacity(gpa, 1); @@ -5852,8 +5878,9 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst alignment, }); } - const func = sema.func orelse + const func_index = sema.func_index.unwrap() orelse return sema.fail(block, src, "@setAlignStack outside function body", .{}); + const func = mod.funcPtr(func_index); const fn_owner_decl = mod.declPtr(func.owner_decl); switch (fn_owner_decl.ty.fnCallingConvention(mod)) { @@ -5864,7 +5891,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst }, } - const gop = try mod.align_stack_fns.getOrPut(mod.gpa, func); + const gop = try mod.align_stack_fns.getOrPut(sema.gpa, func_index); if (gop.found_existing) { const msg = msg: { const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{}); @@ -6191,10 +6218,13 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { const mod = sema.mod; const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null; if (func_val.isUndef(mod)) return null; - const owner_decl_index = switch (func_val.tag()) { - .extern_fn => func_val.castTag(.extern_fn).?.data.owner_decl, - .function => func_val.castTag(.function).?.data.owner_decl, - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl, + const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => return null, + }, else => return null, }; return mod.declPtr(owner_decl_index); @@ -6576,20 +6606,22 @@ const GenericCallAdapter = struct { is_anytype: bool, }; - pub fn eql(ctx: @This(), adapted_key: void, other_key: *Module.Fn) bool { + pub fn eql(ctx: @This(), adapted_key: void, other_key: Module.Fn.Index) bool { _ = adapted_key; + const other_func = ctx.module.funcPtr(other_key); + // Checking for equality may happen on an item that has been inserted // into the map but is not yet fully initialized. In such case, the // two initialized fields are `hash` and `generic_owner_decl`. - if (ctx.generic_fn.owner_decl != other_key.generic_owner_decl.unwrap().?) return false; + if (ctx.generic_fn.owner_decl != other_func.generic_owner_decl.unwrap().?) return false; - const other_comptime_args = other_key.comptime_args.?; + const other_comptime_args = other_func.comptime_args.?; for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| { const this_arg = ctx.args[i]; const this_is_comptime = !this_arg.val.isGenericPoison(); const other_is_comptime = !other_arg.val.isGenericPoison(); const this_is_anytype = this_arg.is_anytype; - const other_is_anytype = other_key.isAnytypeParam(ctx.module, @intCast(u32, i)); + const other_is_anytype = other_func.isAnytypeParam(ctx.module, @intCast(u32, i)); if (other_is_anytype != this_is_anytype) return false; if (other_is_comptime != this_is_comptime) return false; @@ -6663,7 +6695,7 @@ fn analyzeCall( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); + if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -6760,18 +6792,21 @@ fn analyzeCall( if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err); return err; }; - const module_fn = switch (func_val.tag()) { - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data, - .function => func_val.castTag(.function).?.data, - .extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{ + const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), - else => { - assert(callee_ty.isPtrAtRuntime(mod)); - return sema.fail(block, call_src, "{s} call of function pointer", .{ - @as([]const u8, if (is_comptime_call) "comptime" else "inline"), - }); + .func => |function| function.index, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| mod.declPtr(decl).getFunctionIndex(mod).unwrap().?, + else => { + assert(callee_ty.isPtrAtRuntime(mod)); + return sema.fail(block, call_src, "{s} call of function pointer", .{ + @as([]const u8, if (is_comptime_call) "comptime" else "inline"), + }); + }, }, + else => unreachable, }; if (func_ty_info.is_var_args) { return sema.fail(block, call_src, "{s} call of variadic function", .{ @@ -6804,6 +6839,7 @@ fn analyzeCall( // In order to save a bit of stack space, directly modify Sema rather // than create a child one. const parent_zir = sema.code; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); sema.code = fn_owner_decl.getFileScope(mod).zir; defer sema.code = parent_zir; @@ -6819,8 +6855,11 @@ fn analyzeCall( } const parent_func = sema.func; + const parent_func_index = sema.func_index; sema.func = module_fn; + sema.func_index = module_fn_index.toOptional(); defer sema.func = parent_func; + defer sema.func_index = parent_func_index; const parent_err_ret_index = sema.error_return_trace_index_on_fn_entry; sema.error_return_trace_index_on_fn_entry = block.error_return_trace_index; @@ -6856,7 +6895,7 @@ fn analyzeCall( defer if (delete_memoized_call_key) gpa.free(memoized_call_key.args); if (is_comptime_call) { memoized_call_key = .{ - .func = module_fn, + .func = module_fn_index, .args = try gpa.alloc(TypedValue, func_ty_info.param_types.len), }; delete_memoized_call_key = true; @@ -6889,7 +6928,7 @@ fn analyzeCall( &child_block, .unneeded, inst, - new_fn_info, + &new_fn_info, &arg_i, uncasted_args, is_comptime_call, @@ -6907,7 +6946,7 @@ fn analyzeCall( &child_block, mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src), inst, - new_fn_info, + &new_fn_info, &arg_i, uncasted_args, is_comptime_call, @@ -6950,7 +6989,7 @@ fn analyzeCall( const fn_ret_ty = blk: { if (module_fn.hasInferredErrorSet(mod)) { const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ - .func = module_fn, + .func = module_fn_index, }); const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); @@ -6982,7 +7021,7 @@ fn analyzeCall( const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { - try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin); + try sema.emitDbgInline(block, parent_func_index.unwrap().?, module_fn_index, new_func_resolved_ty, .dbg_inline_begin); const zir_tags = sema.code.instructions.items(.tag); for (fn_info.param_body) |param| switch (zir_tags[param]) { @@ -7014,7 +7053,7 @@ fn analyzeCall( error.ComptimeReturn => break :result inlining.comptime_result, error.AnalysisFail => { const err_msg = sema.err orelse return err; - if (std.mem.eql(u8, err_msg.msg, recursive_msg)) return err; + if (mem.eql(u8, err_msg.msg, recursive_msg)) return err; try sema.errNote(block, call_src, err_msg, "called from here", .{}); err_msg.clearTrace(sema.gpa); return err; @@ -7027,8 +7066,8 @@ fn analyzeCall( if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag(mod) != .NoReturn) { try sema.emitDbgInline( block, - module_fn, - parent_func.?, + module_fn_index, + parent_func_index.unwrap().?, mod.declPtr(parent_func.?.owner_decl).ty, .dbg_inline_end, ); @@ -7120,8 +7159,8 @@ fn analyzeCall( } if (try sema.resolveMaybeUndefVal(func)) |func_val| { - if (func_val.castTag(.function)) |func_obj| { - try sema.mod.ensureFuncBodyAnalysisQueued(func_obj.data); + if (mod.intern_pool.indexToFunc(func_val.toIntern()).unwrap()) |func_index| { + try sema.mod.ensureFuncBodyAnalysisQueued(func_index); } } @@ -7147,9 +7186,9 @@ fn analyzeCall( // Function pointers and extern functions aren't guaranteed to // actually be noreturn so we add a safety check for them. check: { - var func_val = (try sema.resolveMaybeUndefVal(func)) orelse break :check; - switch (func_val.tag()) { - .function, .decl_ref => { + const func_val = (try sema.resolveMaybeUndefVal(func)) orelse break :check; + switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .func, .extern_func, .ptr => { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; }, @@ -7196,7 +7235,7 @@ fn analyzeInlineCallArg( param_block: *Block, arg_src: LazySrcLoc, inst: Zir.Inst.Index, - new_fn_info: InternPool.Key.FuncType, + new_fn_info: *InternPool.Key.FuncType, arg_i: *usize, uncasted_args: []const Air.Inst.Ref, is_comptime_call: bool, @@ -7263,7 +7302,7 @@ fn analyzeInlineCallArg( try sema.resolveLazyValue(arg_val); }, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); + should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(sema.mod); memoized_call_key.args[arg_i.*] = .{ .ty = param_ty.toType(), .val = arg_val, @@ -7302,7 +7341,7 @@ fn analyzeInlineCallArg( try sema.resolveLazyValue(arg_val); }, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); + should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(sema.mod); memoized_call_key.args[arg_i.*] = .{ .ty = sema.typeOf(uncasted_arg), .val = arg_val, @@ -7387,11 +7426,11 @@ fn instantiateGenericCall( const gpa = sema.gpa; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn = switch (func_val.tag()) { - .function => func_val.castTag(.function).?.data, - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data, + const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .func => |function| function.index, + .ptr => |ptr| mod.declPtr(ptr.addr.decl).getFunctionIndex(mod).unwrap().?, else => unreachable, - }; + }); // Check the Module's generic function map with an adapted context, so that we // can match against `uncasted_args` rather than doing the work below to create a // generic Scope only to junk it if it matches an existing instantiation. @@ -7496,16 +7535,17 @@ fn instantiateGenericCall( .args = generic_args, .module = mod, }; - const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter); - const callee = if (!gop.found_existing) callee: { - const new_module_func = try gpa.create(Module.Fn); + const gop = try mod.monomorphed_funcs.getOrPutContextAdapted(gpa, {}, adapter, .{ .mod = mod }); + const callee_index = if (!gop.found_existing) callee: { + const new_module_func_index = try mod.createFunc(undefined); + const new_module_func = mod.funcPtr(new_module_func_index); // This ensures that we can operate on the hash map before the Module.Fn // struct is fully initialized. new_module_func.hash = precomputed_hash; new_module_func.generic_owner_decl = module_fn.owner_decl.toOptional(); new_module_func.comptime_args = null; - gop.key_ptr.* = new_module_func; + gop.key_ptr.* = new_module_func_index; try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); @@ -7549,7 +7589,7 @@ fn instantiateGenericCall( new_decl_index, uncasted_args, module_fn, - new_module_func, + new_module_func_index, namespace_index, func_ty_info, call_src, @@ -7565,12 +7605,12 @@ fn instantiateGenericCall( } assert(namespace.anon_decls.orderedRemove(new_decl_index)); mod.destroyDecl(new_decl_index); - assert(mod.monomorphed_funcs.remove(new_module_func)); - gpa.destroy(new_module_func); + assert(mod.monomorphed_funcs.removeContext(new_module_func_index, .{ .mod = mod })); + mod.destroyFunc(new_module_func_index); return err; }, else => { - assert(mod.monomorphed_funcs.remove(new_module_func)); + assert(mod.monomorphed_funcs.removeContext(new_module_func_index, .{ .mod = mod })); { errdefer new_decl_arena.deinit(); try new_decl.finalizeNewArena(&new_decl_arena); @@ -7590,6 +7630,7 @@ fn instantiateGenericCall( try new_decl.finalizeNewArena(&new_decl_arena); break :callee new_func; } else gop.key_ptr.*; + const callee = mod.funcPtr(callee_index); callee.branch_quota = @max(callee.branch_quota, sema.branch_quota); @@ -7645,7 +7686,7 @@ fn instantiateGenericCall( sema.owner_func.?.calls_or_awaits_errorable_fn = true; } - try sema.mod.ensureFuncBodyAnalysisQueued(callee); + try sema.mod.ensureFuncBodyAnalysisQueued(callee_index); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args_len); @@ -7682,12 +7723,12 @@ fn resolveGenericInstantiationType( new_decl_index: Decl.Index, uncasted_args: []const Air.Inst.Ref, module_fn: *Module.Fn, - new_module_func: *Module.Fn, + new_module_func: Module.Fn.Index, namespace: Namespace.Index, func_ty_info: InternPool.Key.FuncType, call_src: LazySrcLoc, bound_arg_src: ?LazySrcLoc, -) !*Module.Fn { +) !Module.Fn.Index { const mod = sema.mod; const gpa = sema.gpa; @@ -7707,11 +7748,13 @@ fn resolveGenericInstantiationType( .owner_decl = new_decl, .owner_decl_index = new_decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len), .comptime_args_fn_inst = module_fn.zir_body_inst, - .preallocated_new_func = new_module_func, + .preallocated_new_func = new_module_func.toOptional(), .is_generic_instantiation = true, .branch_quota = sema.branch_quota, .branch_count = sema.branch_count, @@ -7802,8 +7845,8 @@ fn resolveGenericInstantiationType( const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst); const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable; - const new_func = new_func_val.castTag(.function).?.data; - errdefer new_func.deinit(gpa); + const new_func = new_func_val.getFunctionIndex(mod).unwrap().?; + errdefer mod.destroyFunc(new_func); assert(new_func == new_module_func); arg_i = 0; @@ -7867,7 +7910,10 @@ fn resolveGenericInstantiationType( return error.GenericPoison; } - new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func); + new_decl.val = (try mod.intern(.{ .func = .{ + .ty = new_decl.ty.ip_index, + .index = new_func, + } })).toValue(); new_decl.@"align" = 0; new_decl.has_tv = true; new_decl.owns_tv = true; @@ -7900,8 +7946,8 @@ fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) fn emitDbgInline( sema: *Sema, block: *Block, - old_func: *Module.Fn, - new_func: *Module.Fn, + old_func: Module.Fn.Index, + new_func: Module.Fn.Index, new_func_ty: Type, tag: Air.Inst.Tag, ) CompileError!void { @@ -7910,7 +7956,10 @@ fn emitDbgInline( // Recursive inline call; no dbg_inline needed. if (old_func == new_func) return; - try sema.air_values.append(sema.gpa, try Value.Tag.function.create(sema.arena, new_func)); + try sema.air_values.append(sema.gpa, (try sema.mod.intern(.{ .func = .{ + .ty = new_func_ty.ip_index, + .index = new_func, + } })).toValue()); _ = try block.addInst(.{ .tag = tag, .data = .{ .ty_pl = .{ @@ -8078,12 +8127,11 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const name = inst_data.get(sema.code); // Create an error set type with only this error value, and return the value. const kv = try sema.mod.getErrorValue(name); - return sema.addConstant( - try mod.singleErrorSetType(kv.key), - try Value.Tag.@"error".create(sema.arena, .{ - .name = kv.key, - }), - ); + const error_set_type = try mod.singleErrorSetType(kv.key); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), + } })).toValue()); } fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { @@ -8101,23 +8149,11 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (val.isUndef(mod)) { return sema.addConstUndef(Type.err_int); } - switch (val.tag()) { - .@"error" => { - return sema.addConstant( - Type.err_int, - try mod.intValue( - Type.err_int, - (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, - ), - ); - }, - - // This is not a valid combination with the type `anyerror`. - .the_only_possible_value => unreachable, - - // Assume it's already encoded as an integer. - else => return sema.addConstant(Type.err_int, val), - } + const err_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + return sema.addConstant(Type.err_int, try mod.intValue( + Type.err_int, + (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value, + )); } const op_ty = sema.typeOf(uncasted_operand); @@ -8142,23 +8178,21 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const uncasted_operand = try sema.resolveInst(extra.operand); const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src); - const mod = sema.mod; if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod)); if (int > sema.mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); - const payload = try sema.arena.create(Value.Payload.Error); - payload.* = .{ - .base = .{ .tag = .@"error" }, - .data = .{ .name = sema.mod.error_name_list.items[int] }, - }; - return sema.addConstant(Type.anyerror, Value.initPayload(&payload.base)); + return sema.addConstant(Type.anyerror, (try mod.intern(.{ .err = .{ + .ty = .anyerror_type, + .name = mod.intern_pool.getString(sema.mod.error_name_list.items[int]).unwrap().?, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety()) { @@ -8234,12 +8268,12 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); - return sema.addConstant( - .{ .ip_index = .enum_literal_type }, - try Value.Tag.enum_literal.create(sema.arena, duped_name), - ); + const name = inst_data.get(sema.code); + return sema.addConstant(.{ .ip_index = .enum_literal_type }, (try mod.intern(.{ + .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name), + })).toValue()); } fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -8404,32 +8438,26 @@ fn analyzeOptionalPayloadPtr( if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| { if (initializing) { - if (!ptr_val.isComptimeMutablePtr()) { + if (!ptr_val.isComptimeMutablePtr(mod)) { // If the pointer resulting from this function was stored at comptime, // the optional non-null bit would be set that way. But in this case, // we need to emit a runtime instruction to do it. _ = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); } - return sema.addConstant( - child_pointer, - try Value.Tag.opt_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(mod), - }), - ); + return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ + .ty = child_pointer.ip_index, + .addr = .{ .opt_payload = ptr_val.ip_index }, + } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { if (val.isNull(mod)) { return sema.fail(block, src, "unable to unwrap null", .{}); } // The same Value represents the pointer to the optional and the payload. - return sema.addConstant( - child_pointer, - try Value.Tag.opt_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(mod), - }), - ); + return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ + .ty = child_pointer.ip_index, + .addr = .{ .opt_payload = ptr_val.ip_index }, + } })).toValue()); } } @@ -8532,11 +8560,13 @@ fn analyzeErrUnionPayload( const mod = sema.mod; const payload_ty = err_union_ty.errorUnionPayload(mod); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - if (val.getError()) |name| { + if (val.getError(mod)) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } - const data = val.castTag(.eu_payload).?.data; - return sema.addConstant(payload_ty, data); + return sema.addConstant( + payload_ty, + mod.intern_pool.indexToKey(val.ip_index).error_union.val.payload.toValue(), + ); } try sema.requireRuntimeBlock(block, src, null); @@ -8595,33 +8625,26 @@ fn analyzeErrUnionPayloadPtr( if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| { if (initializing) { - if (!ptr_val.isComptimeMutablePtr()) { + if (!ptr_val.isComptimeMutablePtr(mod)) { // If the pointer resulting from this function was stored at comptime, // the error union error code would be set that way. But in this case, // we need to emit a runtime instruction to do it. try sema.requireRuntimeBlock(block, src, null); _ = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); } - return sema.addConstant( - operand_pointer_ty, - try Value.Tag.eu_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = operand_ty.childType(mod), - }), - ); + return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ + .ty = operand_pointer_ty.ip_index, + .addr = .{ .eu_payload = ptr_val.ip_index }, + } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { - if (val.getError()) |name| { + if (val.getError(mod)) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } - - return sema.addConstant( - operand_pointer_ty, - try Value.Tag.eu_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = operand_ty.childType(mod), - }), - ); + return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ + .ty = operand_pointer_ty.ip_index, + .addr = .{ .eu_payload = ptr_val.ip_index }, + } })).toValue()); } } @@ -8664,7 +8687,7 @@ fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air const result_ty = operand_ty.errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - assert(val.getError() != null); + assert(val.getError(mod) != null); return sema.addConstant(result_ty, val); } @@ -8694,7 +8717,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { - assert(val.getError() != null); + assert(val.getError(mod) != null); return sema.addConstant(result_ty, val); } } @@ -8931,20 +8954,21 @@ fn funcCommon( } var destroy_fn_on_error = false; - const new_func: *Module.Fn = new_func: { + const new_func_index = new_func: { if (!has_body) break :new_func undefined; if (sema.comptime_args_fn_inst == func_inst) { - const new_func = sema.preallocated_new_func.?; - sema.preallocated_new_func = null; // take ownership - break :new_func new_func; + const new_func_index = sema.preallocated_new_func.unwrap().?; + sema.preallocated_new_func = .none; // take ownership + break :new_func new_func_index; } destroy_fn_on_error = true; - const new_func = try gpa.create(Module.Fn); + var new_func: Module.Fn = undefined; // Set this here so that the inferred return type can be printed correctly if it appears in an error. new_func.owner_decl = sema.owner_decl_index; - break :new_func new_func; + const new_func_index = try mod.createFunc(new_func); + break :new_func new_func_index; }; - errdefer if (destroy_fn_on_error) gpa.destroy(new_func); + errdefer if (destroy_fn_on_error) mod.destroyFunc(new_func_index); const target = sema.mod.getTarget(); const fn_ty: Type = fn_ty: { @@ -9008,7 +9032,7 @@ fn funcCommon( else blk: { try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ - .func = new_func, + .func = new_func_index, }); const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); @@ -9158,26 +9182,16 @@ fn funcCommon( sema.owner_decl.@"addrspace" = address_space orelse .generic; if (is_extern) { - const new_extern_fn = try gpa.create(Module.ExternFn); - errdefer gpa.destroy(new_extern_fn); - - new_extern_fn.* = Module.ExternFn{ - .owner_decl = sema.owner_decl_index, - .lib_name = null, - }; - - if (opt_lib_name) |lib_name| { - new_extern_fn.lib_name = try sema.handleExternLibName(block, .{ - .node_offset_lib_name = src_node_offset, - }, lib_name); - } - - const extern_fn_payload = try sema.arena.create(Value.Payload.ExternFn); - extern_fn_payload.* = .{ - .base = .{ .tag = .extern_fn }, - .data = new_extern_fn, - }; - return sema.addConstant(fn_ty, Value.initPayload(&extern_fn_payload.base)); + return sema.addConstant(fn_ty, (try mod.intern(.{ .extern_func = .{ + .ty = fn_ty.ip_index, + .decl = sema.owner_decl_index, + .lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString( + gpa, + try sema.handleExternLibName(block, .{ + .node_offset_lib_name = src_node_offset, + }, lib_name), + )).toOptional() else .none, + } })).toValue()); } if (!has_body) { @@ -9191,9 +9205,9 @@ fn funcCommon( break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr; } else null; + const new_func = mod.funcPtr(new_func_index); const hash = new_func.hash; const generic_owner_decl = if (comptime_args == null) .none else new_func.generic_owner_decl; - const fn_payload = try sema.arena.create(Value.Payload.Function); new_func.* = .{ .state = anal_state, .zir_body_inst = func_inst, @@ -9208,11 +9222,10 @@ fn funcCommon( .branch_quota = default_branch_quota, .is_noinline = is_noinline, }; - fn_payload.* = .{ - .base = .{ .tag = .function }, - .data = new_func, - }; - return sema.addConstant(fn_ty, Value.initPayload(&fn_payload.base)); + return sema.addConstant(fn_ty, (try mod.intern(.{ .func = .{ + .ty = fn_ty.ip_index, + .index = new_func_index, + } })).toValue()); } fn analyzeParameter( @@ -9312,7 +9325,7 @@ fn zirParam( const prev_preallocated_new_func = sema.preallocated_new_func; const prev_no_partial_func_type = sema.no_partial_func_ty; block.params = .{}; - sema.preallocated_new_func = null; + sema.preallocated_new_func = .none; sema.no_partial_func_ty = true; defer { block.params.deinit(sema.gpa); @@ -9369,7 +9382,7 @@ fn zirParam( else => |e| return e, } or comptime_syntax; if (sema.inst_map.get(inst)) |arg| { - if (is_comptime and sema.preallocated_new_func != null) { + if (is_comptime and sema.preallocated_new_func != .none) { // We have a comptime value for this parameter so it should be elided from the // function type of the function instruction in this block. const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) { @@ -9392,7 +9405,7 @@ fn zirParam( assert(sema.inst_map.remove(inst)); } - if (sema.preallocated_new_func != null) { + if (sema.preallocated_new_func != .none) { if (try sema.typeHasOnePossibleValue(param_ty)) |opv| { // In this case we are instantiating a generic function call with a non-comptime // non-anytype parameter that ended up being a one-possible-type. @@ -9640,8 +9653,8 @@ fn intCast( if (wanted_bits == 0) { const ok = if (is_vector) ok: { - const zeros = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); - const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros); + const zeros = try sema.splat(operand_ty, try mod.intValue(operand_scalar_ty, 0)); + const zero_inst = try sema.addConstant(operand_ty, zeros); const is_in_range = try block.addCmpVector(operand, zero_inst, .eq); const all_in_range = try block.addInst(.{ .tag = .reduce, @@ -9649,7 +9662,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(sema.typeOf(operand), try mod.intValue(operand_ty, 0)); + const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst); break :ok is_in_range; }; @@ -9673,10 +9686,7 @@ fn intCast( // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_ty); - const dest_max_val = if (is_vector) - try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) - else - dest_max_val_scalar; + const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar); const dest_max = try sema.addConstant(operand_ty, dest_max_val); const diff = try block.addBinOp(.subwrap, dest_max, operand); @@ -9732,7 +9742,8 @@ fn intCast( // no shrinkage, yes sign loss // requirement: signed to unsigned >= 0 const ok = if (is_vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); + const scalar_zero = try mod.intValue(operand_scalar_ty, 0); + const zero_val = try sema.splat(operand_ty, scalar_zero); const zero_inst = try sema.addConstant(operand_ty, zero_val); const is_in_range = try block.addCmpVector(operand, zero_inst, .gte); const all_in_range = try block.addInst(.{ @@ -10139,17 +10150,18 @@ fn zirSwitchCapture( .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = union_val, - .container_ty = operand_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = union_val.ip_index, + .index = field_index, + } }, + } })).toValue()); } - const tag_and_val = union_val.castTag(.@"union").?.data; - return sema.addConstant(field_ty, tag_and_val.val); + return sema.addConstant( + field_ty, + mod.intern_pool.indexToKey(union_val.ip_index).un.val.toValue(), + ); } if (is_ref) { const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ @@ -10243,14 +10255,13 @@ fn zirSwitchCapture( }); if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { - return sema.addConstant( - field_ty_ptr, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = op_ptr_val, - .container_ty = operand_ty, - .field_index = first_field_index, - }), - ); + return sema.addConstant(field_ty_ptr, (try mod.intern(.{ .ptr = .{ + .ty = field_ty_ptr.ip_index, + .addr = .{ .field = .{ + .base = op_ptr_val.ip_index, + .index = first_field_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, operand_src, null); return block.addStructFieldPtr(operand_ptr, first_field_index, field_ty_ptr); @@ -10273,7 +10284,7 @@ fn zirSwitchCapture( const item_ref = try sema.resolveInst(item); // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError().?); + const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError(mod).?); names.putAssumeCapacityNoClobber(name_ip, {}); } const else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); @@ -10284,7 +10295,7 @@ fn zirSwitchCapture( // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - const item_ty = try mod.singleErrorSetType(item_val.getError().?); + const item_ty = try mod.singleErrorSetType(item_val.getError(mod).?); return sema.bitCast(block, item_ty, operand, operand_src, null); } }, @@ -10809,10 +10820,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError check_range: { if (operand_ty.zigTypeTag(mod) == .Int) { - var arena = std.heap.ArenaAllocator.init(gpa); - defer arena.deinit(); - - const min_int = try operand_ty.minInt(arena.allocator(), mod); + const min_int = try operand_ty.minInt(mod); const max_int = try operand_ty.maxIntScalar(mod, Type.comptime_int); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { @@ -11493,8 +11501,11 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (seen_errors.contains(error_name)) continue; cases_len += 1; - const item_val = try Value.Tag.@"error".create(sema.arena, .{ .name = error_name }); - const item_ref = try sema.addConstant(operand_ty, item_val); + const item_val = try mod.intern(.{ .err = .{ + .ty = operand_ty.ip_index, + .name = error_name_ip, + } }); + const item_ref = try sema.addConstant(operand_ty, item_val.toValue()); case_block.inline_case_capture = item_ref; case_block.instructions.shrinkRetainingCapacity(0); @@ -11665,7 +11676,7 @@ const RangeSetUnhandledIterator = struct { fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const mod = sema.mod; - const min = try ty.minInt(sema.arena, mod); + const min = try ty.minInt(mod); const max = try ty.maxIntScalar(mod, Type.comptime_int); return RangeSetUnhandledIterator{ @@ -11788,9 +11799,10 @@ fn validateSwitchItemError( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { + const ip = &sema.mod.intern_pool; const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); // TODO: Do i need to typecheck here? - const error_name = item_tv.val.castTag(.@"error").?.data.name; + const error_name = ip.stringToSlice(ip.indexToKey(item_tv.val.ip_index).err.name); const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev| prev.value else @@ -11983,7 +11995,7 @@ fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Ind } if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| { if (!operand_ty.isError(mod)) return; - if (val.getError() == null) return; + if (val.getError(mod) == null) return; try sema.maybeErrorUnwrapComptime(block, body, err_operand); } } @@ -12005,7 +12017,7 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I const src = inst_data.src(); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.getError()) |name| { + if (val.getError(sema.mod)) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } } @@ -12172,11 +12184,11 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R // Return the error code from the function. const kv = try mod.getErrorValue(err_name); - const result_inst = try sema.addConstant( - try mod.singleErrorSetType(kv.key), - try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), - ); - return result_inst; + const error_set_type = try mod.singleErrorSetType(kv.key); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = mod.intern_pool.getString(kv.key).unwrap().?, + } })).toValue()); } fn zirShl( @@ -12301,7 +12313,7 @@ fn zirShl( { const max_int = try sema.addConstant( lhs_ty, - try lhs_ty.maxInt(sema.arena, mod, lhs_ty), + try lhs_ty.maxInt(mod, lhs_ty), ); const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); @@ -12316,7 +12328,7 @@ fn zirShl( if (!std.math.isPowerOfTwo(bit_count)) { const bit_count_val = try mod.intValue(scalar_rhs_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { - const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); + const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ .tag = .reduce, @@ -12466,7 +12478,7 @@ fn zirShr( const bit_count_val = try mod.intValue(scalar_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { - const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); + const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ .tag = .reduce, @@ -13179,11 +13191,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs); } - const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) - else - try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); - + const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))); return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13203,11 +13211,7 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}), } - const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) - else - try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); - + const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))); return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13254,8 +13258,6 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -13325,9 +13327,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } @@ -13427,8 +13427,6 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -13469,9 +13467,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } @@ -13555,7 +13551,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else => unreachable, }; if (resolved_type.zigTypeTag(mod) == .Vector) { - const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); + const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const eql = try block.addCmpVector(remainder, zero, .eq); break :ok try block.addInst(.{ @@ -13600,8 +13596,6 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -13644,9 +13638,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } @@ -13721,8 +13713,6 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -13765,9 +13755,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } @@ -13843,12 +13831,9 @@ fn addDivIntOverflowSafety( return; } - const min_int = try resolved_type.minInt(sema.arena, mod); + const min_int = try resolved_type.minInt(mod); const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1); - const neg_one = if (resolved_type.zigTypeTag(mod) == .Vector) - try Value.Tag.repeated.create(sema.arena, neg_one_scalar) - else - neg_one_scalar; + const neg_one = try sema.splat(resolved_type, neg_one_scalar); // If the LHS is comptime-known to be not equal to the min int, // no overflow is possible. @@ -13924,7 +13909,7 @@ fn addDivByZeroSafety( else try mod.floatValue(resolved_type.scalarType(mod), 0); const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); + const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const ok = try block.addCmpVector(casted_rhs, zero, .neq); break :ok try block.addInst(.{ @@ -14012,9 +13997,10 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = if (is_vector) (try mod.intern(.{ .aggregate = .{ + .ty = resolved_type.ip_index, + .storage = .{ .repeated_elem = scalar_zero.ip_index }, + } })).toValue() else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } else if (lhs_scalar_ty.isSignedInt(mod)) { @@ -14399,12 +14385,12 @@ fn zirOverflowArithmetic( // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14425,7 +14411,7 @@ fn zirOverflowArithmetic( if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; @@ -14444,9 +14430,9 @@ fn zirOverflowArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; - } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; + } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs }; } } } @@ -14454,9 +14440,9 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod)) { if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; - } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs }; + } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } } } @@ -14478,12 +14464,12 @@ fn zirOverflowArithmetic( // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14544,10 +14530,14 @@ fn zirOverflowArithmetic( return block.addAggregateInit(tuple_ty, element_refs); } -fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value { +fn splat(sema: *Sema, ty: Type, val: Value) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) != .Vector) return val; - return Value.Tag.repeated.create(sema.arena, val); + const repeated = try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = val.ip_index }, + } }); + return repeated.toValue(); } fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { @@ -14603,8 +14593,6 @@ fn analyzeArithmetic( .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -14853,9 +14841,7 @@ fn analyzeArithmetic( } else if (resolved_type.isAnyFloat()) { break :lz; } - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14886,9 +14872,7 @@ fn analyzeArithmetic( } else if (resolved_type.isAnyFloat()) { break :rz; } - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14931,9 +14915,7 @@ fn analyzeArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14947,9 +14929,7 @@ fn analyzeArithmetic( return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14979,9 +14959,7 @@ fn analyzeArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14994,9 +14972,7 @@ fn analyzeArithmetic( return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -15138,7 +15114,7 @@ fn analyzePtrArithmetic( if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } - const new_ptr_val = try ptr_val.elemPtr(ptr_ty, sema.arena, offset_int, sema.mod); + const new_ptr_val = try ptr_val.elemPtr(ptr_ty, offset_int, sema.mod); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; @@ -15184,7 +15160,7 @@ fn zirAsm( const inputs_len = @truncate(u5, extended.small >> 5); const clobbers_len = @truncate(u5, extended.small >> 10); const is_volatile = @truncate(u1, extended.small >> 15) != 0; - const is_global_assembly = sema.func == null; + const is_global_assembly = sema.func_index == .none; const asm_source: []const u8 = if (tmpl_is_expr) blk: { const tmpl = @intToEnum(Zir.Inst.Ref, extra.data.asm_source); @@ -15387,12 +15363,7 @@ fn zirCmpEq( if (lval.isUndef(mod) or rval.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - // TODO optimisation opportunity: evaluate if mem.eql is faster with the names, - // or calling to Module.getErrorValue to get the values and then compare them is - // faster. - const lhs_name = lval.castTag(.@"error").?.data.name; - const rhs_name = rval.castTag(.@"error").?.data.name; - if (mem.eql(u8, lhs_name, rhs_name) == (op == .eq)) { + if (lval.toIntern() == rval.toIntern()) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -15650,8 +15621,8 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .AnyFrame, => {}, } - const val = try ty.lazyAbiSize(mod, sema.arena); - if (val.isLazySize()) { + const val = try ty.lazyAbiSize(mod); + if (val.isLazySize(mod)) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); @@ -15760,11 +15731,11 @@ fn zirClosureGet( scope = scope.parent.?; }; - if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func == null) { + if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func_index == .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); - const tree = file.getTree(mod.gpa) catch |err| { + const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15788,11 +15759,11 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) { + if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); - const tree = file.getTree(mod.gpa) catch |err| { + const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15868,14 +15839,17 @@ fn zirBuiltinSrc( const func_name_val = blk: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const name = std.mem.span(fn_owner_decl.name); + const name = mem.span(fn_owner_decl.name); const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); const new_decl = try anon_decl.finish( try Type.array(anon_decl.arena(), bytes.len - 1, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes), 0, // default alignment ); - break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :blk try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_sentinel_0_type, + .addr = .{ .decl = new_decl }, + } }); }; const file_name_val = blk: { @@ -15888,27 +15862,35 @@ fn zirBuiltinSrc( try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), 0, // default alignment ); - break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :blk try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_sentinel_0_type, + .addr = .{ .decl = new_decl }, + } }); }; - const field_values = try sema.arena.alloc(Value, 4); - // file: [:0]const u8, - field_values[0] = file_name_val; - // fn_name: [:0]const u8, - field_values[1] = func_name_val; - // line: u32 - field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try mod.intValue(Type.u32, extra.line + 1)); - // column: u32, - field_values[3] = try mod.intValue(Type.u32, extra.column + 1); - - return sema.addConstant( - try sema.getBuiltinType("SourceLocation"), - try Value.Tag.aggregate.create(sema.arena, field_values), - ); + const src_loc_ty = try sema.getBuiltinType("SourceLocation"); + const fields = .{ + // file: [:0]const u8, + file_name_val, + // fn_name: [:0]const u8, + func_name_val, + // line: u32, + try mod.intern(.{ .runtime_value = .{ + .ty = .u32_type, + .val = (try mod.intValue(Type.u32, extra.line + 1)).ip_index, + } }), + // column: u32, + (try mod.intValue(Type.u32, extra.column + 1)).ip_index, + }; + return sema.addConstant(src_loc_ty, (try mod.intern(.{ .aggregate = .{ + .ty = src_loc_ty.ip_index, + .storage = .{ .elems = &fields }, + } })).toValue()); } fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; + const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -15916,69 +15898,20 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const type_info_tag_ty = type_info_ty.unionTagType(mod).?; switch (ty.zigTypeTag(mod)) { - .Type => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Type)), - .val = Value.void, - }), - ), - .Void => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Void)), - .val = Value.void, - }), - ), - .Bool => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Bool)), - .val = Value.void, - }), - ), - .NoReturn => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.NoReturn)), - .val = Value.void, - }), - ), - .ComptimeFloat => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeFloat)), - .val = Value.void, - }), - ), - .ComptimeInt => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeInt)), - .val = Value.void, - }), - ), - .Undefined => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Undefined)), - .val = Value.void, - }), - ), - .Null => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Null)), - .val = Value.void, - }), - ), - .EnumLiteral => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.EnumLiteral)), - .val = Value.void, - }), - ), + .Type, + .Void, + .Bool, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .EnumLiteral, + => |type_info_tag| return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(type_info_tag))).ip_index, + .val = .void_value, + } })).toValue()), .Fn => { // TODO: look into memoizing this result. const info = mod.typeToFunc(ty).?; @@ -15986,11 +15919,34 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var params_anon_decl = try block.startAnonDecl(); defer params_anon_decl.deinit(); - const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len); + const fn_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Fn", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); + try sema.ensureDeclAnalyzed(fn_info_decl_index); + const fn_info_decl = mod.declPtr(fn_info_decl_index); + const fn_info_ty = fn_info_decl.val.toType(); + + const param_info_decl_index = (try sema.namespaceLookup( + block, + src, + fn_info_ty.getNamespaceIndex(mod).unwrap().?, + "Param", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); + try sema.ensureDeclAnalyzed(param_info_decl_index); + const param_info_decl = mod.declPtr(param_info_decl_index); + const param_info_ty = param_info_decl.val.toType(); + + const param_vals = try gpa.alloc(InternPool.Index, info.param_types.len); + defer gpa.free(param_vals); for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| { const is_generic = param_ty == .generic_poison_type; - const param_ty_val = try mod.intern_pool.get(mod.gpa, .{ .opt = .{ - .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }), + const param_ty_val = try mod.intern_pool.get(gpa, .{ .opt = .{ + .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }), .val = if (is_generic) .none else param_ty, } }); @@ -15999,87 +15955,74 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :blk @truncate(u1, info.noalias_bits >> index) != 0; }; - const param_fields = try params_anon_decl.arena().create([3]Value); - param_fields.* = .{ + const param_fields = .{ // is_generic: bool, - Value.makeBool(is_generic), + Value.makeBool(is_generic).ip_index, // is_noalias: bool, - Value.makeBool(is_noalias), + Value.makeBool(is_noalias).ip_index, // type: ?type, - param_ty_val.toValue(), + param_ty_val, }; - param_val.* = try Value.Tag.aggregate.create(params_anon_decl.arena(), param_fields); + param_val.* = try mod.intern(.{ .aggregate = .{ + .ty = param_info_ty.ip_index, + .storage = .{ .elems = ¶m_fields }, + } }); } const args_val = v: { - const fn_info_decl_index = (try sema.namespaceLookup( - block, - src, - type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Fn", - )).?; - try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); - try sema.ensureDeclAnalyzed(fn_info_decl_index); - const fn_info_decl = mod.declPtr(fn_info_decl_index); - const fn_ty = fn_info_decl.val.toType(); - const param_info_decl_index = (try sema.namespaceLookup( - block, - src, - fn_ty.getNamespaceIndex(mod).unwrap().?, - "Param", - )).?; - try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); - try sema.ensureDeclAnalyzed(param_info_decl_index); - const param_info_decl = mod.declPtr(param_info_decl_index); - const param_ty = param_info_decl.val.toType(); + const args_slice_ty = try mod.ptrType(.{ + .elem_type = param_info_ty.ip_index, + .size = .Slice, + .is_const = true, + }); const new_decl = try params_anon_decl.finish( try mod.arrayType(.{ .len = param_vals.len, - .child = param_ty.ip_index, + .child = param_info_ty.ip_index, .sentinel = .none, }), - try Value.Tag.aggregate.create( - params_anon_decl.arena(), - param_vals, - ), + (try mod.intern(.{ .aggregate = .{ + .ty = args_slice_ty.ip_index, + .storage = .{ .elems = param_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try mod.intValue(Type.usize, param_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = args_slice_ty.ip_index, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, param_vals.len)).ip_index, + } }); }; - const ret_ty_opt = try mod.intern_pool.get(mod.gpa, .{ .opt = .{ - .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }), + const ret_ty_opt = try mod.intern(.{ .opt = .{ + .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }), .val = if (info.return_type == .generic_poison_type) .none else info.return_type, } }); const callconv_ty = try sema.getBuiltinType("CallingConvention"); - const field_values = try sema.arena.create([6]Value); - field_values.* = .{ + const field_values = .{ // calling_convention: CallingConvention, - try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc)), + (try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc))).ip_index, // alignment: comptime_int, - try mod.intValue(Type.comptime_int, ty.abiAlignment(mod)), + (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).ip_index, // is_generic: bool, - Value.makeBool(info.is_generic), + Value.makeBool(info.is_generic).ip_index, // is_var_args: bool, - Value.makeBool(info.is_var_args), + Value.makeBool(info.is_var_args).ip_index, // return_type: ?type, - ret_ty_opt.toValue(), + ret_ty_opt, // args: []const Fn.Param, args_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = fn_info_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Int => { const signedness_ty = try sema.getBuiltinType("Signedness"); @@ -16099,24 +16042,36 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Float => { - const field_values = try sema.arena.alloc(Value, 1); - // bits: u16, - field_values[0] = try mod.intValue(Type.u16, ty.bitSize(mod)); - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const float_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Float", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, float_info_decl_index); + try sema.ensureDeclAnalyzed(float_info_decl_index); + const float_info_decl = mod.declPtr(float_info_decl_index); + const float_ty = float_info_decl.val.toType(); + + const field_vals = .{ + // bits: u16, + (try mod.intValue(Type.u16, ty.bitSize(mod))).ip_index, + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = float_ty.ip_index, + .storage = .{ .elems = &field_vals }, + } }), + } })).toValue()); }, .Pointer => { const info = ty.ptrInfo(mod); const alignment = if (info.@"align" != 0) try mod.intValue(Type.comptime_int, info.@"align") else - try info.pointee_type.lazyAbiAlignment(mod, sema.arena); + try info.pointee_type.lazyAbiAlignment(mod); const addrspace_ty = try sema.getBuiltinType("AddressSpace"); const pointer_ty = t: { @@ -16245,9 +16200,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise - const error_field_vals: ?[]Value = if (ty.isAnyError(mod)) null else blk: { + const error_field_vals = if (ty.isAnyError(mod)) null else blk: { const names = ty.errorSetNames(mod); - const vals = try fields_anon_decl.arena().alloc(Value, names.len); + const vals = try gpa.alloc(InternPool.Index, names.len); + defer gpa.free(vals); for (vals, names) |*field_val, name_ip| { const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { @@ -16259,70 +16215,91 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + } }); }; - const error_field_fields = try fields_anon_decl.arena().create([1]Value); - error_field_fields.* = .{ + const error_field_fields = .{ // name: []const u8, name_val, }; - - field_val.* = try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - error_field_fields, - ); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = error_field_ty.ip_index, + .storage = .{ .elems = &error_field_fields }, + } }); } break :blk vals; }; // Build our ?[]const Error value - const errors_val = if (error_field_vals) |vals| v: { + const slice_errors_ty = try mod.ptrType(.{ + .elem_type = error_field_ty.ip_index, + .size = .Slice, + .is_const = true, + }); + const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.ip_index); + const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: { + const array_errors_ty = try mod.arrayType(.{ + .len = vals.len, + .child = error_field_ty.ip_index, + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try mod.arrayType(.{ - .len = vals.len, - .child = error_field_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - vals, - ), + array_errors_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_errors_ty.ip_index, + .storage = .{ .elems = vals }, + } })).toValue(), 0, // default alignment ); - - const new_decl_val = try Value.Tag.decl_ref.create(sema.arena, new_decl); - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = new_decl_val, - .len = try mod.intValue(Type.usize, vals.len), - }); - break :v try Value.Tag.opt_payload.create(sema.arena, slice_val); - } else Value.null; + break :v try mod.intern(.{ .ptr = .{ + .ty = slice_errors_ty.ip_index, + .addr = .{ .decl = new_decl }, + } }); + } else .none; + const errors_val = try mod.intern(.{ .opt = .{ + .ty = opt_slice_errors_ty.ip_index, + .val = errors_payload_val, + } }); // Construct Type{ .ErrorSet = errors_val } - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet)), - .val = errors_val, - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet))).ip_index, + .val = errors_val, + } })).toValue()); }, .ErrorUnion => { - const field_values = try sema.arena.alloc(Value, 2); - // error_set: type, - field_values[0] = ty.errorUnionSet(mod).toValue(); - // payload: type, - field_values[1] = ty.errorUnionPayload(mod).toValue(); + const error_union_field_ty = t: { + const error_union_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "ErrorUnion", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, error_union_field_ty_decl_index); + try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index); + const error_union_field_ty_decl = mod.declPtr(error_union_field_ty_decl_index); + break :t error_union_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // error_set: type, + ty.errorUnionSet(mod).ip_index, + // payload: type, + ty.errorUnionPayload(mod).ip_index, + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = error_union_field_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Enum => { // TODO: look into memoizing this result. @@ -16346,7 +16323,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t enum_field_ty_decl.val.toType(); }; - const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_type.names.len); + const enum_field_vals = try gpa.alloc(InternPool.Index, enum_type.names.len); + defer gpa.free(enum_field_vals); for (enum_field_vals, 0..) |*field_val, i| { const name_ip = enum_type.names[i]; @@ -16360,56 +16338,81 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + } }); }; - const enum_field_fields = try fields_anon_decl.arena().create([2]Value); - enum_field_fields.* = .{ + const enum_field_fields = .{ // name: []const u8, name_val, // value: comptime_int, - try mod.intValue(Type.comptime_int, i), + (try mod.intValue(Type.comptime_int, i)).ip_index, }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), enum_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = enum_field_ty.ip_index, + .storage = .{ .elems = &enum_field_fields }, + } }); } const fields_val = v: { + const fields_array_ty = try mod.arrayType(.{ + .len = enum_field_vals.len, + .child = enum_field_ty.ip_index, + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try mod.arrayType(.{ - .len = enum_field_vals.len, - .child = enum_field_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - enum_field_vals, - ), + fields_array_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = fields_array_ty.ip_index, + .storage = .{ .elems = enum_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .elem_type = enum_field_ty.ip_index, + .size = .Slice, + .is_const = true, + })).ip_index, + .addr = .{ .decl = new_decl }, + } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, enum_type.namespace); - const field_values = try sema.arena.create([4]Value); - field_values.* = .{ + const type_enum_ty = t: { + const type_enum_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Enum", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_enum_ty_decl_index); + try sema.ensureDeclAnalyzed(type_enum_ty_decl_index); + const type_enum_ty_decl = mod.declPtr(type_enum_ty_decl_index); + break :t type_enum_ty_decl.val.toType(); + }; + + const field_values = .{ // tag_type: type, - enum_type.tag_ty.toValue(), + enum_type.tag_ty, // fields: []const EnumField, fields_val, // decls: []const Declaration, decls_val, // is_exhaustive: bool, - is_exhaustive, + is_exhaustive.ip_index, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_enum_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Union => { // TODO: look into memoizing this result. @@ -16417,6 +16420,19 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); + const type_union_ty = t: { + const type_union_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Union", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_union_ty_decl_index); + try sema.ensureDeclAnalyzed(type_union_ty_decl_index); + const type_union_ty_decl = mod.declPtr(type_union_ty_decl_index); + break :t type_union_ty_decl.val.toType(); + }; + const union_field_ty = t: { const union_field_ty_decl_index = (try sema.namespaceLookup( block, @@ -16435,7 +16451,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const layout = union_ty.containerLayout(mod); const union_fields = union_ty.unionFields(mod); - const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count()); + const union_field_vals = try gpa.alloc(InternPool.Index, union_fields.count()); + defer gpa.free(union_field_vals); for (union_field_vals, 0..) |*field_val, i| { const field = union_fields.values()[i]; @@ -16449,51 +16466,62 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + } }); }; - const union_field_fields = try fields_anon_decl.arena().create([3]Value); const alignment = switch (layout) { .Auto, .Extern => try sema.unionFieldAlignment(field), .Packed => 0, }; - union_field_fields.* = .{ + const union_field_fields = .{ // name: []const u8, name_val, // type: type, - field.ty.toValue(), + field.ty.ip_index, // alignment: comptime_int, - try mod.intValue(Type.comptime_int, alignment), + (try mod.intValue(Type.comptime_int, alignment)).ip_index, }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), union_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = union_field_ty.ip_index, + .storage = .{ .elems = &union_field_fields }, + } }); } const fields_val = v: { + const array_fields_ty = try mod.arrayType(.{ + .len = union_field_vals.len, + .child = union_field_ty.ip_index, + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try mod.arrayType(.{ - .len = union_field_vals.len, - .child = union_field_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - try fields_anon_decl.arena().dupe(Value, union_field_vals), - ), + array_fields_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_fields_ty.ip_index, + .storage = .{ .elems = union_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try mod.intValue(Type.usize, union_field_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .elem_type = union_field_ty.ip_index, + .size = .Slice, + .is_const = true, + })).ip_index, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, union_field_vals.len)).ip_index, + } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod)); - const enum_tag_ty_val = if (union_ty.unionTagType(mod)) |tag_ty| v: { - const ty_val = tag_ty.toValue(); - break :v try Value.Tag.opt_payload.create(sema.arena, ty_val); - } else Value.null; + const enum_tag_ty_val = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType(.type_type)).ip_index, + .val = if (union_ty.unionTagType(mod)) |tag_ty| tag_ty.ip_index else .none, + } }); const container_layout_ty = t: { const decl_index = (try sema.namespaceLookup( @@ -16508,10 +16536,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t decl.val.toType(); }; - const field_values = try sema.arena.create([4]Value); - field_values.* = .{ + const field_values = .{ // layout: ContainerLayout, - try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)), + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index, // tag_type: ?type, enum_tag_ty_val, @@ -16520,14 +16547,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_union_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Struct => { // TODO: look into memoizing this result. @@ -16535,6 +16562,19 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); + const type_struct_ty = t: { + const type_struct_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Struct", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_struct_ty_decl_index); + try sema.ensureDeclAnalyzed(type_struct_ty_decl_index); + const type_struct_ty_decl = mod.declPtr(type_struct_ty_decl_index); + break :t type_struct_ty_decl.val.toType(); + }; + const struct_field_ty = t: { const struct_field_ty_decl_index = (try sema.namespaceLookup( block, @@ -16547,14 +16587,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); break :t struct_field_ty_decl.val.toType(); }; + const struct_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout const layout = struct_ty.containerLayout(mod); - const struct_field_vals = fv: { + var struct_field_vals: []InternPool.Index = &.{}; + defer gpa.free(struct_field_vals); + fv: { const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { .anon_struct_type => |tuple| { - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, tuple.types.len); + struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len); for ( tuple.types, tuple.values, @@ -16574,38 +16617,40 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes.ptr[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ - .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try mod.intValue(Type.usize, bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + } }); }; - const struct_field_fields = try fields_anon_decl.arena().create([5]Value); const is_comptime = field_val != .none; const opt_default_val = if (is_comptime) field_val.toValue() else null; const default_val_ptr = try sema.optRefValue(block, field_ty.toType(), opt_default_val); - struct_field_fields.* = .{ + const struct_field_fields = .{ // name: []const u8, name_val, // type: type, - field_ty.toValue(), + field_ty, // default_value: ?*const anyopaque, - try default_val_ptr.copy(fields_anon_decl.arena()), + default_val_ptr.ip_index, // is_comptime: bool, - Value.makeBool(is_comptime), + Value.makeBool(is_comptime).ip_index, // alignment: comptime_int, - try field_ty.toType().lazyAbiAlignment(mod, fields_anon_decl.arena()), + (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).ip_index, }; - struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); + struct_field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = struct_field_ty.ip_index, + .storage = .{ .elems = &struct_field_fields }, + } }); } - break :fv struct_field_vals; + break :fv; }, .struct_type => |s| s, else => unreachable, }; - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse - break :fv &[0]Value{}; - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_obj.fields.count()); + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :fv; + struct_field_vals = try gpa.alloc(InternPool.Index, struct_obj.fields.count()); for ( struct_field_vals, @@ -16621,13 +16666,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ - .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try mod.intValue(Type.usize, bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + } }); }; - const struct_field_fields = try fields_anon_decl.arena().create([5]Value); const opt_default_val = if (field.default_val.ip_index == .unreachable_value) null else @@ -16635,55 +16680,61 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val); const alignment = field.alignment(mod, layout); - struct_field_fields.* = .{ + const struct_field_fields = .{ // name: []const u8, name_val, // type: type, - field.ty.toValue(), + field.ty.ip_index, // default_value: ?*const anyopaque, - try default_val_ptr.copy(fields_anon_decl.arena()), + default_val_ptr.ip_index, // is_comptime: bool, - Value.makeBool(field.is_comptime), + Value.makeBool(field.is_comptime).ip_index, // alignment: comptime_int, - try mod.intValue(Type.comptime_int, alignment), + (try mod.intValue(Type.comptime_int, alignment)).ip_index, }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = struct_field_ty.ip_index, + .storage = .{ .elems = &struct_field_fields }, + } }); } - break :fv struct_field_vals; - }; + } const fields_val = v: { + const array_fields_ty = try mod.arrayType(.{ + .len = struct_field_vals.len, + .child = struct_field_ty.ip_index, + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try mod.arrayType(.{ - .len = struct_field_vals.len, - .child = struct_field_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - try fields_anon_decl.arena().dupe(Value, struct_field_vals), - ), + array_fields_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_fields_ty.ip_index, + .storage = .{ .elems = struct_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try mod.intValue(Type.usize, struct_field_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .elem_type = struct_field_ty.ip_index, + .size = .Slice, + .is_const = true, + })).ip_index, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, struct_field_vals.len)).ip_index, + } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespaceIndex(mod)); - const backing_integer_val = blk: { - if (layout == .Packed) { + const backing_integer_val = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType(.type_type)).ip_index, + .val = if (layout == .Packed) val: { const struct_obj = mod.typeToStruct(struct_ty).?; assert(struct_obj.haveLayout()); assert(struct_obj.backing_int_ty.isInt(mod)); - const backing_int_ty_val = struct_obj.backing_int_ty.toValue(); - break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val); - } else { - break :blk Value.null; - } - }; + break :val struct_obj.backing_int_ty.ip_index; + } else .none, + } }); const container_layout_ty = t: { const decl_index = (try sema.namespaceLookup( @@ -16698,10 +16749,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t decl.val.toType(); }; - const field_values = try sema.arena.create([5]Value); - field_values.* = .{ + const field_values = [_]InternPool.Index{ // layout: ContainerLayout, - try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)), + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index, // backing_integer: ?type, backing_integer_val, // fields: []const StructField, @@ -16709,36 +16759,48 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_tuple: bool, - Value.makeBool(struct_ty.isTuple(mod)), + Value.makeBool(struct_ty.isTuple(mod)).ip_index, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_struct_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Opaque => { // TODO: look into memoizing this result. + const type_opaque_ty = t: { + const type_opaque_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Opaque", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_opaque_ty_decl_index); + try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index); + const type_opaque_ty_decl = mod.declPtr(type_opaque_ty_decl_index); + break :t type_opaque_ty_decl.val.toType(); + }; + const opaque_ty = try sema.resolveTypeFields(ty); const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespaceIndex(mod)); - const field_values = try sema.arena.create([1]Value); - field_values.* = .{ + const field_values = .{ // decls: []const Declaration, decls_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_opaque_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Frame => return sema.failWithUseOfAsync(block, src), .AnyFrame => return sema.failWithUseOfAsync(block, src), @@ -16751,7 +16813,7 @@ fn typeInfoDecls( src: LazySrcLoc, type_info_ty: Type, opt_namespace: Module.Namespace.OptionalIndex, -) CompileError!Value { +) CompileError!InternPool.Index { const mod = sema.mod; var decls_anon_decl = try block.startAnonDecl(); defer decls_anon_decl.deinit(); @@ -16770,7 +16832,7 @@ fn typeInfoDecls( }; try sema.queueFullTypeResolution(declaration_ty); - var decl_vals = std.ArrayList(Value).init(sema.gpa); + var decl_vals = std.ArrayList(InternPool.Index).init(sema.gpa); defer decl_vals.deinit(); var seen_namespaces = std.AutoHashMap(*Namespace, void).init(sema.gpa); @@ -16778,33 +16840,39 @@ fn typeInfoDecls( if (opt_namespace.unwrap()) |namespace_index| { const namespace = mod.namespacePtr(namespace_index); - try sema.typeInfoNamespaceDecls(block, decls_anon_decl.arena(), namespace, &decl_vals, &seen_namespaces); + try sema.typeInfoNamespaceDecls(block, namespace, declaration_ty, &decl_vals, &seen_namespaces); } + const array_decl_ty = try mod.arrayType(.{ + .len = decl_vals.items.len, + .child = declaration_ty.ip_index, + .sentinel = .none, + }); const new_decl = try decls_anon_decl.finish( - try mod.arrayType(.{ - .len = decl_vals.items.len, - .child = declaration_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - decls_anon_decl.arena(), - try decls_anon_decl.arena().dupe(Value, decl_vals.items), - ), + array_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_decl_ty.ip_index, + .storage = .{ .elems = decl_vals.items }, + } })).toValue(), 0, // default alignment ); - return try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try mod.intValue(Type.usize, decl_vals.items.len), - }); + return try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .elem_type = declaration_ty.ip_index, + .size = .Slice, + .is_const = true, + })).ip_index, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, decl_vals.items.len)).ip_index, + } }); } fn typeInfoNamespaceDecls( sema: *Sema, block: *Block, - decls_anon_decl: Allocator, namespace: *Namespace, - decl_vals: *std.ArrayList(Value), + declaration_ty: Type, + decl_vals: *std.ArrayList(InternPool.Index), seen_namespaces: *std.AutoHashMap(*Namespace, void), ) !void { const mod = sema.mod; @@ -16817,7 +16885,7 @@ fn typeInfoNamespaceDecls( if (decl.analysis == .in_progress) continue; try mod.ensureDeclAnalyzed(decl_index); const new_ns = decl.val.toType().getNamespace(mod).?; - try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); + try sema.typeInfoNamespaceDecls(block, new_ns, declaration_ty, decl_vals, seen_namespaces); continue; } if (decl.kind != .named) continue; @@ -16830,20 +16898,23 @@ fn typeInfoNamespaceDecls( try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.slice.create(decls_anon_decl, .{ - .ptr = try Value.Tag.decl_ref.create(decls_anon_decl, new_decl), - .len = try mod.intValue(Type.usize, bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + } }); }; - const fields = try decls_anon_decl.create([2]Value); - fields.* = .{ + const fields = .{ //name: []const u8, name_val, //is_pub: bool, - Value.makeBool(decl.is_pub), + Value.makeBool(decl.is_pub).ip_index, }; - try decl_vals.append(try Value.Tag.aggregate.create(decls_anon_decl, fields)); + try decl_vals.append(try mod.intern(.{ .aggregate = .{ + .ty = declaration_ty.ip_index, + .storage = .{ .elems = &fields }, + } })); } } @@ -17454,10 +17525,11 @@ fn zirRetErrValue( // Return the error code from the function. const kv = try mod.getErrorValue(err_name); - const result_inst = try sema.addConstant( - try mod.singleErrorSetType(err_name), - try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), - ); + const error_set_type = try mod.singleErrorSetType(err_name); + const result_inst = try sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), + } })).toValue()); return sema.analyzeRet(block, result_inst, src); } @@ -17782,10 +17854,12 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known"); // Check if this happens to be the lazy alignment of our element type, in // which case we can make this 0 without resolving it. - if (val.castTag(.lazy_align)) |payload| { - if (payload.data.eql(elem_ty, sema.mod)) { - break :blk .none; - } + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.ip_index) break :blk .none, + else => {}, + }, + else => {}, } const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?); try sema.validateAlign(block, align_src, abi_align); @@ -17910,12 +17984,10 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); } } - if (obj_ty.sentinel(mod)) |sentinel| { - const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel); - return sema.addConstant(obj_ty, val); - } else { - return sema.addConstant(obj_ty, Value.initTag(.empty_array)); - } + return sema.addConstant(obj_ty, (try mod.intern(.{ .aggregate = .{ + .ty = obj_ty.ip_index, + .storage = .{ .elems = &.{} }, + } })).toValue()); } fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -18679,8 +18751,8 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (ty.isNoReturn(mod)) { return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } - const val = try ty.lazyAbiAlignment(mod, sema.arena); - if (val.isLazyAlign()) { + const val = try ty.lazyAbiAlignment(mod); + if (val.isLazyAlign(mod)) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); @@ -18704,7 +18776,8 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - const bytes = val.castTag(.@"error").?.data.name; + const err_name = sema.mod.intern_pool.indexToKey(val.ip_index).err.name; + const bytes = sema.mod.intern_pool.stringToSlice(err_name); return sema.addStrLit(block, bytes); } @@ -18794,7 +18867,8 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); - const bytes = val.castTag(.enum_literal).?.data; + const tag_name = mod.intern_pool.indexToKey(val.ip_index).enum_literal; + const bytes = mod.intern_pool.stringToSlice(tag_name); return sema.addStrLit(block, bytes); }, .Enum => operand_ty, @@ -18883,11 +18957,8 @@ fn zirReify( .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const signedness_index = fields.getIndex("signedness").?; - const bits_index = fields.getIndex("bits").?; - - const signedness_val = try union_val.val.fieldValue(fields.values()[signedness_index].ty, mod, signedness_index); - const bits_val = try union_val.val.fieldValue(fields.values()[bits_index].ty, mod, bits_index); + const signedness_val = try union_val.val.fieldValue(mod, fields.getIndex("signedness").?); + const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); @@ -18896,11 +18967,8 @@ fn zirReify( }, .Vector => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const len_index = fields.getIndex("len").?; - const child_index = fields.getIndex("child").?; - - const len_val = try union_val.val.fieldValue(fields.values()[len_index].ty, mod, len_index); - const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); + const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); + const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); const len = @intCast(u32, len_val.toUnsignedInt(mod)); const child_ty = child_val.toType(); @@ -18915,9 +18983,7 @@ fn zirReify( }, .Float => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const bits_index = fields.getIndex("bits").?; - - const bits_val = try union_val.val.fieldValue(fields.values()[bits_index].ty, mod, bits_index); + const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = switch (bits) { @@ -18932,23 +18998,14 @@ fn zirReify( }, .Pointer => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const size_index = fields.getIndex("size").?; - const is_const_index = fields.getIndex("is_const").?; - const is_volatile_index = fields.getIndex("is_volatile").?; - const alignment_index = fields.getIndex("alignment").?; - const address_space_index = fields.getIndex("address_space").?; - const child_index = fields.getIndex("child").?; - const is_allowzero_index = fields.getIndex("is_allowzero").?; - const sentinel_index = fields.getIndex("sentinel").?; - - const size_val = try union_val.val.fieldValue(fields.values()[size_index].ty, mod, size_index); - const is_const_val = try union_val.val.fieldValue(fields.values()[is_const_index].ty, mod, is_const_index); - const is_volatile_val = try union_val.val.fieldValue(fields.values()[is_volatile_index].ty, mod, is_volatile_index); - const alignment_val = try union_val.val.fieldValue(fields.values()[alignment_index].ty, mod, alignment_index); - const address_space_val = try union_val.val.fieldValue(fields.values()[address_space_index].ty, mod, address_space_index); - const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); - const is_allowzero_val = try union_val.val.fieldValue(fields.values()[is_allowzero_index].ty, mod, is_allowzero_index); - const sentinel_val = try union_val.val.fieldValue(fields.values()[sentinel_index].ty, mod, sentinel_index); + const size_val = try union_val.val.fieldValue(mod, fields.getIndex("size").?); + const is_const_val = try union_val.val.fieldValue(mod, fields.getIndex("is_const").?); + const is_volatile_val = try union_val.val.fieldValue(mod, fields.getIndex("is_volatile").?); + const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?); + const address_space_val = try union_val.val.fieldValue(mod, fields.getIndex("address_space").?); + const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); + const is_allowzero_val = try union_val.val.fieldValue(mod, fields.getIndex("is_allowzero").?); + const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); @@ -19032,22 +19089,18 @@ fn zirReify( }, .Array => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const len_index = fields.getIndex("len").?; - const child_index = fields.getIndex("child").?; - const sentinel_index = fields.getIndex("sentinel").?; - - const len_val = try union_val.val.fieldValue(fields.values()[len_index].ty, mod, len_index); - const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); - const sentinel_val = try union_val.val.fieldValue(fields.values()[sentinel_index].ty, mod, sentinel_index); + const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); + const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); + const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?); const len = len_val.toUnsignedInt(mod); const child_ty = child_val.toType(); - const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: { + const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: { const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, .pointee_type = child_ty, }); - break :blk (try sema.pointerDeref(block, src, p.data, ptr_ty)).?; + break :blk (try sema.pointerDeref(block, src, p, ptr_ty)).?; } else null; const ty = try Type.array(sema.arena, len, sentinel, child_ty, mod); @@ -19055,9 +19108,7 @@ fn zirReify( }, .Optional => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const child_index = fields.getIndex("child").?; - - const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); + const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); const child_ty = child_val.toType(); @@ -19066,11 +19117,8 @@ fn zirReify( }, .ErrorUnion => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const error_set_index = fields.getIndex("error_set").?; - const payload_index = fields.getIndex("payload").?; - - const error_set_val = try union_val.val.fieldValue(fields.values()[error_set_index].ty, mod, error_set_index); - const payload_val = try union_val.val.fieldValue(fields.values()[payload_index].ty, mod, payload_index); + const error_set_val = try union_val.val.fieldValue(mod, fields.getIndex("error_set").?); + const payload_val = try union_val.val.fieldValue(mod, fields.getIndex("payload").?); const error_set_ty = error_set_val.toType(); const payload_ty = payload_val.toType(); @@ -19085,18 +19133,17 @@ fn zirReify( .ErrorSet => { const payload_val = union_val.val.optionalValue(mod) orelse return sema.addType(Type.anyerror); - const slice_val = payload_val.castTag(.slice).?.data; - const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod)); + const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod)); var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); for (0..len) |i| { - const elem_val = try slice_val.ptr.elemValue(mod, i); + const elem_val = try payload_val.elemValue(mod, i); const struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // error_set: type, const name_val = struct_val[0]; - const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); + const name_str = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); const name_ip = try mod.intern_pool.getOrPutString(gpa, name_str); const gop = names.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { @@ -19109,17 +19156,11 @@ fn zirReify( }, .Struct => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const layout_index = fields.getIndex("layout").?; - const backing_integer_index = fields.getIndex("backing_integer").?; - const fields_index = fields.getIndex("fields").?; - const decls_index = fields.getIndex("decls").?; - const is_tuple_index = fields.getIndex("is_tuple").?; - - const layout_val = try union_val.val.fieldValue(fields.values()[layout_index].ty, mod, layout_index); - const backing_integer_val = try union_val.val.fieldValue(fields.values()[backing_integer_index].ty, mod, backing_integer_index); - const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index); - const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); - const is_tuple_val = try union_val.val.fieldValue(fields.values()[is_tuple_index].ty, mod, is_tuple_index); + const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?); + const backing_integer_val = try union_val.val.fieldValue(mod, fields.getIndex("backing_integer").?); + const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); + const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); + const is_tuple_val = try union_val.val.fieldValue(mod, fields.getIndex("is_tuple").?); const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -19136,15 +19177,10 @@ fn zirReify( }, .Enum => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const tag_type_index = fields.getIndex("tag_type").?; - const fields_index = fields.getIndex("fields").?; - const decls_index = fields.getIndex("decls").?; - const is_exhaustive_index = fields.getIndex("is_exhaustive").?; - - const tag_type_val = try union_val.val.fieldValue(fields.values()[tag_type_index].ty, mod, tag_type_index); - const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index); - const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); - const is_exhaustive_val = try union_val.val.fieldValue(fields.values()[is_exhaustive_index].ty, mod, is_exhaustive_index); + const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?); + const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); + const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); + const is_exhaustive_val = try union_val.val.fieldValue(mod, fields.getIndex("is_exhaustive").?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19195,7 +19231,7 @@ fn zirReify( const value_val = field_struct_val[1]; const field_name = try name_val.toAllocatedBytes( - Type.const_slice_u8, + Type.slice_const_u8, sema.arena, mod, ); @@ -19237,9 +19273,7 @@ fn zirReify( }, .Opaque => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const decls_index = fields.getIndex("decls").?; - - const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); + const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19283,15 +19317,10 @@ fn zirReify( }, .Union => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const layout_index = fields.getIndex("layout").?; - const tag_type_index = fields.getIndex("tag_type").?; - const fields_index = fields.getIndex("fields").?; - const decls_index = fields.getIndex("decls").?; - - const layout_val = try union_val.val.fieldValue(fields.values()[layout_index].ty, mod, layout_index); - const tag_type_val = try union_val.val.fieldValue(fields.values()[tag_type_index].ty, mod, tag_type_index); - const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index); - const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); + const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?); + const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?); + const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); + const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19386,7 +19415,7 @@ fn zirReify( const alignment_val = field_struct_val[2]; const field_name = try name_val.toAllocatedBytes( - Type.const_slice_u8, + Type.slice_const_u8, new_decl_arena_allocator, mod, ); @@ -19489,19 +19518,12 @@ fn zirReify( }, .Fn => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const calling_convention_index = fields.getIndex("calling_convention").?; - const alignment_index = fields.getIndex("alignment").?; - const is_generic_index = fields.getIndex("is_generic").?; - const is_var_args_index = fields.getIndex("is_var_args").?; - const return_type_index = fields.getIndex("return_type").?; - const params_index = fields.getIndex("params").?; - - const calling_convention_val = try union_val.val.fieldValue(fields.values()[calling_convention_index].ty, mod, calling_convention_index); - const alignment_val = try union_val.val.fieldValue(fields.values()[alignment_index].ty, mod, alignment_index); - const is_generic_val = try union_val.val.fieldValue(fields.values()[is_generic_index].ty, mod, is_generic_index); - const is_var_args_val = try union_val.val.fieldValue(fields.values()[is_var_args_index].ty, mod, is_var_args_index); - const return_type_val = try union_val.val.fieldValue(fields.values()[return_type_index].ty, mod, return_type_index); - const params_val = try union_val.val.fieldValue(fields.values()[params_index].ty, mod, params_index); + const calling_convention_val = try union_val.val.fieldValue(mod, fields.getIndex("calling_convention").?); + const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?); + const is_generic_val = try union_val.val.fieldValue(mod, fields.getIndex("is_generic").?); + const is_var_args_val = try union_val.val.fieldValue(mod, fields.getIndex("is_var_args").?); + const return_type_val = try union_val.val.fieldValue(mod, fields.getIndex("return_type").?); + const params_val = try union_val.val.fieldValue(mod, fields.getIndex("params").?); const is_generic = is_generic_val.toBool(mod); if (is_generic) { @@ -19528,14 +19550,12 @@ fn zirReify( const return_type = return_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); - const args_slice_val = params_val.castTag(.slice).?.data; - const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod)); - + const args_len = try sema.usizeCast(block, src, params_val.sliceLen(mod)); const param_types = try sema.arena.alloc(InternPool.Index, args_len); var noalias_bits: u32 = 0; for (param_types, 0..) |*param_type, i| { - const arg = try args_slice_val.ptr.elemValue(mod, i); + const arg = try params_val.elemValue(mod, i); const arg_val = arg.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // is_generic: bool, @@ -19676,7 +19696,7 @@ fn reifyStruct( } const field_name = try name_val.toAllocatedBytes( - Type.const_slice_u8, + Type.slice_const_u8, new_decl_arena_allocator, mod, ); @@ -19707,7 +19727,7 @@ fn reifyStruct( } const default_val = if (default_value_val.optionalValue(mod)) |opt_val| blk: { - const payload_val = if (opt_val.pointerDecl()) |opt_decl| + const payload_val = if (opt_val.pointerDecl(mod)) |opt_decl| mod.declPtr(opt_decl).val else opt_val; @@ -20137,7 +20157,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (maybe_operand_val) |val| { if (!dest_ty.isAnyError(mod)) { - const error_name = val.castTag(.@"error").?.data.name; + const error_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(val.ip_index).err.name); if (!dest_ty.errorSetHasField(error_name, mod)) { const msg = msg: { const msg = try sema.errMsg( @@ -20279,7 +20299,10 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) { - return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, operand_val)); + return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{ + .ty = dest_ty.ip_index, + .val = operand_val.toIntern(), + } })).toValue()); } return sema.addConstant(aligned_dest_ty, operand_val); } @@ -20944,7 +20967,7 @@ fn checkPtrIsNotComptimeMutable( operand_src: LazySrcLoc, ) CompileError!void { _ = operand_src; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(sema.mod)) { return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{}); } } @@ -20953,7 +20976,7 @@ fn checkComptimeVarStore( sema: *Sema, block: *Block, src: LazySrcLoc, - decl_ref_mut: Value.Payload.DeclRefMut.Data, + decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl, ) CompileError!void { if (@enumToInt(decl_ref_mut.runtime_index) < @enumToInt(block.runtime_index)) { if (block.runtime_cond) |cond_src| { @@ -21159,7 +21182,7 @@ fn resolveExportOptions( const name_operand = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); - const name_ty = Type.const_slice_u8; + const name_ty = Type.slice_const_u8; const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); @@ -21168,7 +21191,7 @@ fn resolveExportOptions( const section_operand = try sema.fieldVal(block, src, options, "section", section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); - const section_ty = Type.const_slice_u8; + const section_ty = Type.slice_const_u8; const section = if (section_opt_val.optionalValue(mod)) |section_val| try section_val.toAllocatedBytes(section_ty, sema.arena, mod) else @@ -21298,12 +21321,14 @@ fn zirCmpxchg( } const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; - const result_val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: { - try sema.storePtr(block, src, ptr, new_value); - break :blk Value.null; - } else try Value.Tag.opt_payload.create(sema.arena, stored_val); - - return sema.addConstant(result_ty, result_val); + const result_val = try mod.intern(.{ .opt = .{ + .ty = result_ty.ip_index, + .val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: { + try sema.storePtr(block, src, ptr, new_value); + break :blk .none; + } else stored_val.toIntern(), + } }); + return sema.addConstant(result_ty, result_val.toValue()); } else break :rs new_value_src; } else break :rs expected_src; } else ptr_src; @@ -21342,11 +21367,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty); - - return sema.addConstant( - vector_ty, - try Value.Tag.repeated.create(sema.arena, scalar_val), - ); + return sema.addConstant(vector_ty, try sema.splat(vector_ty, scalar_val)); } try sema.requireRuntimeBlock(block, inst_data.src(), scalar_src); @@ -21800,7 +21821,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(mod)) { const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const new_val = switch (op) { @@ -22081,10 +22102,15 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { - const payload = field_ptr_val.castTag(.field_ptr) orelse { - return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{}); - }; - if (payload.data.field_index != field_index) { + const field = switch (mod.intern_pool.indexToKey(field_ptr_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .field => |field| field, + else => null, + }, + else => null, + } orelse return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{}); + + if (field.index != field_index) { const msg = msg: { const msg = try sema.errMsg( block, @@ -22093,7 +22119,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr .{ field_name, field_index, - payload.data.field_index, + field.index, parent_ty.fmt(sema.mod), }, ); @@ -22103,7 +22129,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr }; return sema.failWithOwnedErrorMsg(msg); } - return sema.addConstant(result_ptr, payload.data.container_ptr); + return sema.addConstant(result_ptr, field.base.toValue()); } try sema.requireRuntimeBlock(block, src, ptr_src); @@ -22335,13 +22361,13 @@ fn analyzeMinMax( // Compute the final bounds based on the runtime type and the comptime-known bound type const min_val = switch (air_tag) { - .min => try unrefined_elem_ty.minInt(sema.arena, mod), - .max => try comptime_elem_ty.minInt(sema.arena, mod), // @max(ct, rt) >= ct + .min => try unrefined_elem_ty.minInt(mod), + .max => try comptime_elem_ty.minInt(mod), // @max(ct, rt) >= ct else => unreachable, }; const max_val = switch (air_tag) { - .min => try comptime_elem_ty.maxInt(sema.arena, mod, Type.comptime_int), // @min(ct, rt) <= ct - .max => try unrefined_elem_ty.maxInt(sema.arena, mod, Type.comptime_int), + .min => try comptime_elem_ty.maxInt(mod, Type.comptime_int), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(mod, Type.comptime_int), else => unreachable, }; @@ -22464,7 +22490,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { - if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src; + if (!dest_ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); @@ -22618,7 +22644,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void return; } - if (!ptr_val.isComptimeMutablePtr()) break :rs dest_src; + if (!ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src; if (try sema.resolveMaybeUndefVal(uncoerced_elem)) |_| { for (0..len) |i| { const elem_index = try sema.addIntUnsigned(Type.usize, i); @@ -22696,6 +22722,7 @@ fn zirVarExtended( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 }; const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 }; @@ -22737,32 +22764,17 @@ fn zirVarExtended( try sema.validateVarType(block, ty_src, var_ty, small.is_extern); - const new_var = try sema.gpa.create(Module.Var); - errdefer sema.gpa.destroy(new_var); - - log.debug("created variable {*} owner_decl: {*} ({s})", .{ - new_var, sema.owner_decl, sema.owner_decl.name, - }); - - new_var.* = .{ - .owner_decl = sema.owner_decl_index, - .init = init_val, + return sema.addConstant(var_ty, (try mod.intern(.{ .variable = .{ + .ty = var_ty.ip_index, + .init = init_val.toIntern(), + .decl = sema.owner_decl_index, + .lib_name = if (lib_name) |lname| (try mod.intern_pool.getOrPutString( + sema.gpa, + try sema.handleExternLibName(block, ty_src, lname), + )).toOptional() else .none, .is_extern = small.is_extern, - .is_mutable = true, .is_threadlocal = small.is_threadlocal, - .is_weak_linkage = false, - .lib_name = null, - }; - - if (lib_name) |lname| { - new_var.lib_name = try sema.handleExternLibName(block, ty_src, lname); - } - - const result = try sema.addConstant( - var_ty, - try Value.Tag.variable.create(sema.arena, new_var), - ); - return result; + } })).toValue()); } fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -22861,7 +22873,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; - const ty = Type.const_slice_u8; + const ty = Type.slice_const_u8; const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known"); if (val.isGenericPoison()) { break :blk FuncLinkSection{ .generic = {} }; @@ -23133,10 +23145,10 @@ fn resolveExternOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.ExternOptions { + const mod = sema.mod; const options_inst = try sema.resolveInst(zir_ref); const extern_options_ty = try sema.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); - const mod = sema.mod; const name_src = sema.maybeOptionsSrc(block, src, "name"); const library_src = sema.maybeOptionsSrc(block, src, "library"); @@ -23145,7 +23157,7 @@ fn resolveExternOptions( const name_ref = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known"); - const name = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); + const name = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src); const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known"); @@ -23157,9 +23169,8 @@ fn resolveExternOptions( const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src); const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known"); - const library_name = if (!library_name_val.isNull(mod)) blk: { - const payload = library_name_val.castTag(.opt_payload).?.data; - const library_name = try payload.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); + const library_name = if (library_name_val.optionalValue(mod)) |payload| blk: { + const library_name = try payload.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); if (library_name.len == 0) { return sema.fail(block, library_src, "library name cannot be empty", .{}); } @@ -23227,40 +23238,36 @@ fn zirBuiltinExtern( new_decl.name = try sema.gpa.dupeZ(u8, options.name); { - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const new_var = try new_decl_arena_allocator.create(Module.Var); - new_var.* = .{ - .owner_decl = sema.owner_decl_index, - .init = Value.@"unreachable", + const new_var = try mod.intern(.{ .variable = .{ + .ty = ty.ip_index, + .init = .none, + .decl = sema.owner_decl_index, .is_extern = true, - .is_mutable = false, + .is_const = true, .is_threadlocal = options.is_thread_local, .is_weak_linkage = options.linkage == .Weak, - .lib_name = null, - }; + } }); new_decl.src_line = sema.owner_decl.src_line; // We only access this decl through the decl_ref with the correct type created // below, so this type doesn't matter - new_decl.ty = Type.anyopaque; - new_decl.val = try Value.Tag.variable.create(new_decl_arena_allocator, new_var); + new_decl.ty = ty; + new_decl.val = new_var.toValue(); new_decl.@"align" = 0; new_decl.@"linksection" = null; new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; - - try new_decl.finalizeNewArena(&new_decl_arena); } try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); try sema.ensureDeclAnalyzed(new_decl_index); - const ref = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); - return sema.addConstant(ty, ref); + const ref = try mod.intern(.{ .ptr = .{ + .ty = (try mod.singleConstPtrType(ty)).ip_index, + .addr = .{ .decl = new_decl_index }, + } }); + return sema.addConstant(ty, ref.toValue()); } fn zirWorkItem( @@ -24117,7 +24124,6 @@ fn fieldVal( const mod = sema.mod; const gpa = sema.gpa; - const arena = sema.arena; const ip = &mod.intern_pool; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); @@ -24221,13 +24227,14 @@ fn fieldVal( else => unreachable, } - return sema.addConstant( - if (!child_type.isAnyError(mod)) - child_type - else - try mod.singleErrorSetTypeNts(name), - try Value.Tag.@"error".create(arena, .{ .name = ip.stringToSlice(name) }), - ); + const error_set_type = if (!child_type.isAnyError(mod)) + child_type + else + try mod.singleErrorSetTypeNts(name); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = name, + } })).toValue()); }, .Union => { if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { @@ -24368,14 +24375,13 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return sema.addConstant( - result_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = val, - .container_ty = inner_ty, - .field_index = Value.Payload.Slice.ptr_index, - }), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ + .ty = result_ty.ip_index, + .addr = .{ .field = .{ + .base = val.ip_index, + .index = Value.slice_ptr_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24389,14 +24395,13 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return sema.addConstant( - result_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = val, - .container_ty = inner_ty, - .field_index = Value.Payload.Slice.len_index, - }), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ + .ty = result_ty.ip_index, + .addr = .{ .field = .{ + .base = val.ip_index, + .index = Value.slice_len_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24442,14 +24447,16 @@ fn fieldPtr( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); + const error_set_type = if (!child_type.isAnyError(mod)) + child_type + else + try mod.singleErrorSetTypeNts(name); return sema.analyzeDeclRef(try anon_decl.finish( - if (!child_type.isAnyError(mod)) - child_type - else - try mod.singleErrorSetTypeNts(name), - try Value.Tag.@"error".create(anon_decl.arena(), .{ - .name = ip.stringToSlice(name), - }), + error_set_type, + (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = name, + } })).toValue(), 0, // default alignment )); }, @@ -24714,14 +24721,13 @@ fn finishFieldCallBind( } if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { - const pointer = try sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(arena, .{ - .container_ptr = struct_ptr_val, - .container_ty = container_ty, - .field_index = field_index, - }), - ); + const pointer = try sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = struct_ptr_val.ip_index, + .index = field_index, + } }, + } })).toValue()); return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) }; } @@ -24901,22 +24907,22 @@ fn structFieldPtrByIndex( const ptr_field_ty = try Type.ptr(sema.arena, mod, ptr_ty_data); if (field.is_comptime) { - const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ - .field_ty = field.ty, - .field_val = try field.default_val.copy(sema.arena), - }); - return sema.addConstant(ptr_field_ty, val); + const val = try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .comptime_field = try field.default_val.intern(field.ty, mod) }, + } }); + return sema.addConstant(ptr_field_ty, val.toValue()); } if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = struct_ptr_val, - .container_ty = struct_ptr_ty.childType(mod), - .field_index = field_index, - }), - ); + const val = try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = try struct_ptr_val.intern(struct_ptr_ty, mod), + .index = field_index, + } }, + } }); + return sema.addConstant(ptr_field_ty, val.toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24955,7 +24961,7 @@ fn structFieldVal( if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { return sema.addConstant(field.ty, opv); } - return sema.addConstant(field.ty, try struct_val.fieldValue(field.ty, mod, field_index)); + return sema.addConstant(field.ty, try struct_val.fieldValue(mod, field_index)); } try sema.requireRuntimeBlock(block, src, null); @@ -24999,7 +25005,7 @@ fn tupleFieldIndex( field_name_src: LazySrcLoc, ) CompileError!u32 { const mod = sema.mod; - assert(!std.mem.eql(u8, field_name, "len")); + assert(!mem.eql(u8, field_name, "len")); if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { if (field_index < tuple_ty.structFieldCount(mod)) return field_index; return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{ @@ -25109,14 +25115,13 @@ fn unionFieldPtr( }, .Packed, .Extern => {}, } - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(arena, .{ - .container_ptr = union_ptr_val, - .container_ty = union_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = union_ptr_val.ip_index, + .index = field_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -25267,7 +25272,7 @@ fn elemPtrOneLayerOnly( const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, mod); + const elem_ptr = try ptr_val.elemPtr(indexable_ty, index, mod); const result_ty = try sema.elemPtrType(indexable_ty, index); return sema.addConstant(result_ty, elem_ptr); }; @@ -25313,7 +25318,7 @@ fn elemVal( const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, mod); + const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, index, mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { return sema.addConstant(indexable_ty.elemType2(mod), elem_val); } @@ -25407,22 +25412,20 @@ fn tupleFieldPtr( }); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { - const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ - .field_ty = field_ty, - .field_val = default_val, - }); - return sema.addConstant(ptr_field_ty, val); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .comptime_field = default_val.ip_index }, + } })).toValue()); } if (try sema.resolveMaybeUndefVal(tuple_ptr)) |tuple_ptr_val| { - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = tuple_ptr_val, - .container_ty = tuple_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = tuple_ptr_val.ip_index, + .index = field_index, + } }, + } })).toValue()); } if (!init) { @@ -25463,7 +25466,7 @@ fn tupleField( if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty); - return sema.addConstant(field_ty, try tuple_val.fieldValue(tuple_ty, mod, field_index)); + return sema.addConstant(field_ty, try tuple_val.fieldValue(mod, field_index)); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); @@ -25575,7 +25578,7 @@ fn elemPtrArray( return sema.addConstUndef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, mod); + const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr); } } @@ -25631,7 +25634,7 @@ fn elemValSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod); + const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod); if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } @@ -25691,7 +25694,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod); + const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr_val); } } @@ -25851,7 +25854,7 @@ fn coerceExtra( // Function body to function pointer. if (inst_ty.zigTypeTag(mod) == .Fn) { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const fn_decl = fn_val.pointerDecl().?; + const fn_decl = fn_val.pointerDecl(mod).?; const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } @@ -26080,14 +26083,14 @@ fn coerceExtra( if (inst_child_ty.structFieldCount(mod) == 0) { // Optional slice is represented with a null pointer so // we use a dummy pointer value with the required alignment. - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = if (dest_info.@"align" != 0) + return sema.addConstant(dest_ty, (try mod.intern(.{ .ptr = .{ + .ty = dest_ty.ip_index, + .addr = .{ .int = (if (dest_info.@"align" != 0) try mod.intValue(Type.usize, dest_info.@"align") else - try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), - .len = try mod.intValue(Type.usize, 0), - }); - return sema.addConstant(dest_ty, slice_val); + try dest_info.pointee_type.lazyAbiAlignment(mod)).ip_index }, + .len = (try mod.intValue(Type.usize, 0)).ip_index, + } })).toValue()); } // pointer to tuple to slice @@ -26255,7 +26258,8 @@ fn coerceExtra( .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const bytes = val.castTag(.enum_literal).?.data; + const string = mod.intern_pool.indexToKey(val.ip_index).enum_literal; + const bytes = mod.intern_pool.stringToSlice(string); const field_index = dest_ty.enumFieldIndex(bytes, mod) orelse { const msg = msg: { const msg = try sema.errMsg( @@ -26292,26 +26296,30 @@ fn coerceExtra( if (maybe_inst_val) |inst_val| { switch (inst_val.ip_index) { .undef => return sema.addConstUndef(dest_ty), - .none => switch (inst_val.tag()) { - .eu_payload => { - const payload = try sema.addConstant( - inst_ty.errorUnionPayload(mod), - inst_val.castTag(.eu_payload).?.data, - ); - return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) { - error.NotCoercible => break :eu, - else => |e| return e, - }; + else => switch (mod.intern_pool.indexToKey(inst_val.ip_index)) { + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| { + const error_set_ty = inst_ty.errorUnionSet(mod); + const error_set_val = try sema.addConstant(error_set_ty, (try mod.intern(.{ .err = .{ + .ty = error_set_ty.ip_index, + .name = err_name, + } })).toValue()); + return sema.wrapErrorUnionSet(block, dest_ty, error_set_val, inst_src); + }, + .payload => |payload| { + const payload_val = try sema.addConstant( + inst_ty.errorUnionPayload(mod), + payload.toValue(), + ); + return sema.wrapErrorUnionPayload(block, dest_ty, payload_val, inst_src) catch |err| switch (err) { + error.NotCoercible => break :eu, + else => |e| return e, + }; + }, }, - else => {}, + else => unreachable, }, - else => {}, } - const error_set = try sema.addConstant( - inst_ty.errorUnionSet(mod), - inst_val, - ); - return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src); } }, .ErrorSet => { @@ -27029,7 +27037,7 @@ fn coerceInMemoryAllowedErrorSets( }, } - if (dst_ies.func == sema.owner_func) { + if (dst_ies.func == sema.owner_func_index.unwrap()) { // We are trying to coerce an error set to the current function's // inferred error set. try dst_ies.addErrorSet(src_ty, ip, gpa); @@ -27323,7 +27331,7 @@ fn coerceVarArgParam( ), .Fn => blk: { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const fn_decl = fn_val.pointerDecl().?; + const fn_decl = fn_val.pointerDecl(mod).?; break :blk try sema.analyzeDeclRef(fn_decl); }, .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), @@ -27441,7 +27449,7 @@ fn storePtr2( try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(mod)) { try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty); return; } else break :rs ptr_src; @@ -27593,7 +27601,7 @@ fn storePtrVal( } const ComptimePtrMutationKit = struct { - decl_ref_mut: Value.Payload.DeclRefMut.Data, + decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl, pointee: union(enum) { /// The pointer type matches the actual comptime Value so a direct /// modification is possible. @@ -27619,12 +27627,12 @@ const ComptimePtrMutationKit = struct { decl_arena: std.heap.ArenaAllocator = undefined, fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator { - const decl = mod.declPtr(self.decl_ref_mut.decl_index); + const decl = mod.declPtr(self.decl_ref_mut.decl); return decl.value_arena.?.acquire(mod.gpa, &self.decl_arena); } fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void { - const decl = mod.declPtr(self.decl_ref_mut.decl_index); + const decl = mod.declPtr(self.decl_ref_mut.decl); decl.value_arena.?.release(&self.decl_arena); self.decl_arena = undefined; } @@ -27637,6 +27645,7 @@ fn beginComptimePtrMutation( ptr_val: Value, ptr_elem_ty: Type, ) CompileError!ComptimePtrMutationKit { + if (true) unreachable; const mod = sema.mod; switch (ptr_val.tag()) { .decl_ref_mut => { @@ -28169,7 +28178,7 @@ fn beginComptimePtrMutation( }, } }, - .decl_ref => unreachable, // isComptimeMutablePtr() has been checked already + .decl_ref => unreachable, // isComptimeMutablePtr has been checked already else => unreachable, } } @@ -28189,7 +28198,7 @@ fn beginComptimePtrMutationInner( const decl = mod.declPtr(decl_ref_mut.decl_index); var decl_arena: std.heap.ArenaAllocator = undefined; - const allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena); + const allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); decl_val.* = try decl_val.unintern(allocator, mod); @@ -28273,44 +28282,83 @@ fn beginComptimePtrLoad( const mod = sema.mod; const target = mod.getTarget(); - var deref: ComptimePtrLoadKit = switch (ptr_val.ip_index) { - .null_value => { - return sema.fail(block, src, "attempt to use null value", .{}); - }, - - .none => switch (ptr_val.tag()) { - .decl_ref, - .decl_ref_mut, - => blk: { - const decl_index = switch (ptr_val.tag()) { - .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, + var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl => blk: { + const decl_index = switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, }; - const is_mutable = ptr_val.tag() == .decl_ref_mut; const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); - if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad; + if (decl.getVariable(mod) != null) return error.RuntimeLoad; const layout_defined = decl.ty.hasWellDefinedLayout(mod); break :blk ComptimePtrLoadKit{ .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, .pointee = decl_tv, - .is_mutable = is_mutable, + .is_mutable = false, .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, }; }, + .int => return error.RuntimeLoad, + .eu_payload, .opt_payload => |container_ptr| blk: { + const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod); + const payload_ty = ptr.ty.toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty); - .elem_ptr => blk: { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const elem_ty = elem_ptr.elem_ty; - var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.array_ptr, null); + // eu_payload and opt_payload never have a well-defined layout + if (deref.parent != null) { + deref.parent = null; + deref.ty_without_well_defined_layout = container_ty; + } + + if (deref.pointee) |*tv| { + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; + if (coerce_in_mem_ok) { + const payload_val = switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| return sema.fail(block, src, "attempt to unwrap error: {s}", .{mod.intern_pool.stringToSlice(err_name)}), + .payload => |payload| payload, + }, + .opt => |opt| switch (opt.val) { + .none => return sema.fail(block, src, "attempt to use null value", .{}), + else => opt.val, + }, + else => unreachable, + }; + tv.* = TypedValue{ .ty = payload_ty, .val = payload_val.toValue() }; + break :blk deref; + } + } + deref.pointee = null; + break :blk deref; + }, + .comptime_field => |comptime_field| blk: { + const field_ty = mod.intern_pool.typeOf(comptime_field).toType(); + break :blk ComptimePtrLoadKit{ + .parent = null, + .pointee = .{ .ty = field_ty, .val = comptime_field.toValue() }, + .is_mutable = false, + .ty_without_well_defined_layout = field_ty, + }; + }, + .elem => |elem_ptr| blk: { + const elem_ty = ptr.ty.toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null); // This code assumes that elem_ptrs have been "flattened" in order for direct dereference // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" - if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { - assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, mod))); + switch (mod.intern_pool.indexToKey(elem_ptr.base)) { + .ptr => |base_ptr| switch (base_ptr.addr) { + .elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)), + else => {}, + }, + else => {}, } if (elem_ptr.index != 0) { @@ -28327,7 +28375,7 @@ fn beginComptimePtrLoad( } } - // If we're loading an elem_ptr that was derived from a different type + // If we're loading an elem that was derived from a different type // than the true type of the underlying decl, we cannot deref directly const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { const deref_elem_ty = deref.pointee.?.ty.childType(mod); @@ -28373,31 +28421,25 @@ fn beginComptimePtrLoad( }; break :blk deref; }, + .field => |field_ptr| blk: { + const field_index = @intCast(u32, field_ptr.index); + const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty); - .slice => blk: { - const slice = ptr_val.castTag(.slice).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, slice.ptr, null); - }, - - .field_ptr => blk: { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); - var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); - - if (field_ptr.container_ty.hasWellDefinedLayout(mod)) { - const struct_obj = mod.typeToStruct(field_ptr.container_ty); + if (container_ty.hasWellDefinedLayout(mod)) { + const struct_obj = mod.typeToStruct(container_ty); if (struct_obj != null and struct_obj.?.layout == .Packed) { // packed structs are not byte addressable deref.parent = null; } else if (deref.parent) |*parent| { // Update the byte offset (in-place) - try sema.resolveTypeLayout(field_ptr.container_ty); - const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod); + try sema.resolveTypeLayout(container_ty); + const field_offset = container_ty.structFieldOffset(field_index, mod); parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); } } else { deref.parent = null; - deref.ty_without_well_defined_layout = field_ptr.container_ty; + deref.ty_without_well_defined_layout = container_ty; } const tv = deref.pointee orelse { @@ -28405,294 +28447,40 @@ fn beginComptimePtrLoad( break :blk deref; }; const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok; + (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; if (!coerce_in_mem_ok) { deref.pointee = null; break :blk deref; } - if (field_ptr.container_ty.isSlice(mod)) { - const slice_val = tv.val.castTag(.slice).?.data; + if (container_ty.isSlice(mod)) { deref.pointee = switch (field_index) { - Value.Payload.Slice.ptr_index => TypedValue{ - .ty = field_ptr.container_ty.slicePtrFieldType(mod), - .val = slice_val.ptr, + Value.slice_ptr_index => TypedValue{ + .ty = container_ty.slicePtrFieldType(mod), + .val = tv.val.slicePtr(mod), }, - Value.Payload.Slice.len_index => TypedValue{ + Value.slice_len_index => TypedValue{ .ty = Type.usize, - .val = slice_val.len, + .val = mod.intern_pool.indexToKey(tv.val.ip_index).ptr.len.toValue(), }, else => unreachable, }; } else { - const field_ty = field_ptr.container_ty.structFieldType(field_index, mod); + const field_ty = container_ty.structFieldType(field_index, mod); deref.pointee = TypedValue{ .ty = field_ty, - .val = try tv.val.fieldValue(tv.ty, mod, field_index), + .val = try tv.val.fieldValue(mod, field_index), }; } break :blk deref; }, - - .comptime_field_ptr => blk: { - const comptime_field_ptr = ptr_val.castTag(.comptime_field_ptr).?.data; - break :blk ComptimePtrLoadKit{ - .parent = null, - .pointee = .{ .ty = comptime_field_ptr.field_ty, .val = comptime_field_ptr.field_val }, - .is_mutable = false, - .ty_without_well_defined_layout = comptime_field_ptr.field_ty, - }; - }, - - .opt_payload_ptr, - .eu_payload_ptr, - => blk: { - const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - const payload_ty = switch (ptr_val.tag()) { - .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(mod), - .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod), - else => unreachable, - }; - var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty); - - // eu_payload_ptr and opt_payload_ptr never have a well-defined layout - if (deref.parent != null) { - deref.parent = null; - deref.ty_without_well_defined_layout = payload_ptr.container_ty; - } - - if (deref.pointee) |*tv| { - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok; - if (coerce_in_mem_ok) { - const payload_val = switch (ptr_val.tag()) { - .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { - return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); - }, - .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); - break :opt tv.val; - }, - else => unreachable, - }; - tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; - break :blk deref; - } - } - deref.pointee = null; - break :blk deref; - }, - .opt_payload => blk: { - const opt_payload = ptr_val.castTag(.opt_payload).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null); - }, - - .variable, - .extern_fn, - .function, - => return error.RuntimeLoad, - - else => unreachable, }, - else => switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { - .int => return error.RuntimeLoad, - .ptr => |ptr| switch (ptr.addr) { - .@"var", .int => return error.RuntimeLoad, - .decl, .mut_decl => blk: { - const decl_index = switch (ptr.addr) { - .decl => |decl| decl, - .mut_decl => |mut_decl| mut_decl.decl, - else => unreachable, - }; - const decl = mod.declPtr(decl_index); - const decl_tv = try decl.typedValue(); - if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad; - - const layout_defined = decl.ty.hasWellDefinedLayout(mod); - break :blk ComptimePtrLoadKit{ - .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, - .pointee = decl_tv, - .is_mutable = false, - .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, - }; - }, - .eu_payload, .opt_payload => |container_ptr| blk: { - const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod); - const payload_ty = ptr.ty.toType().childType(mod); - var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty); - - // eu_payload_ptr and opt_payload_ptr never have a well-defined layout - if (deref.parent != null) { - deref.parent = null; - deref.ty_without_well_defined_layout = container_ty; - } - - if (deref.pointee) |*tv| { - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; - if (coerce_in_mem_ok) { - const payload_val = switch (ptr_val.tag()) { - .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { - return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); - }, - .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); - break :opt tv.val; - }, - else => unreachable, - }; - tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; - break :blk deref; - } - } - deref.pointee = null; - break :blk deref; - }, - .comptime_field => |comptime_field| blk: { - const field_ty = mod.intern_pool.typeOf(comptime_field).toType(); - break :blk ComptimePtrLoadKit{ - .parent = null, - .pointee = .{ .ty = field_ty, .val = comptime_field.toValue() }, - .is_mutable = false, - .ty_without_well_defined_layout = field_ty, - }; - }, - .elem => |elem_ptr| blk: { - const elem_ty = ptr.ty.toType().childType(mod); - var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null); - - // This code assumes that elem_ptrs have been "flattened" in order for direct dereference - // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that - // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" - switch (mod.intern_pool.indexToKey(elem_ptr.base)) { - .ptr => |base_ptr| switch (base_ptr.addr) { - .elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)), - else => {}, - }, - else => {}, - } - - if (elem_ptr.index != 0) { - if (elem_ty.hasWellDefinedLayout(mod)) { - if (deref.parent) |*parent| { - // Update the byte offset (in-place) - const elem_size = try sema.typeAbiSize(elem_ty); - const offset = parent.byte_offset + elem_size * elem_ptr.index; - parent.byte_offset = try sema.usizeCast(block, src, offset); - } - } else { - deref.parent = null; - deref.ty_without_well_defined_layout = elem_ty; - } - } - - // If we're loading an elem that was derived from a different type - // than the true type of the underlying decl, we cannot deref directly - const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { - const deref_elem_ty = deref.pointee.?.ty.childType(mod); - break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; - } else false; - if (!ty_matches) { - deref.pointee = null; - break :blk deref; - } - - var array_tv = deref.pointee.?; - const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); - if (maybe_array_ty) |load_ty| { - // It's possible that we're loading a [N]T, in which case we'd like to slice - // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) { - const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); - deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ - .ty = try Type.array(sema.arena, N, null, elem_ty, mod), - .val = try array_tv.val.sliceArray(mod, sema.arena, elem_ptr.index, elem_ptr.index + N), - } else null; - break :blk deref; - } - } - - if (elem_ptr.index >= check_len) { - deref.pointee = null; - break :blk deref; - } - if (elem_ptr.index == check_len - 1) { - if (array_tv.ty.sentinel(mod)) |sent| { - deref.pointee = TypedValue{ - .ty = elem_ty, - .val = sent, - }; - break :blk deref; - } - } - deref.pointee = TypedValue{ - .ty = elem_ty, - .val = try array_tv.val.elemValue(mod, elem_ptr.index), - }; - break :blk deref; - }, - .field => |field_ptr| blk: { - const field_index = @intCast(u32, field_ptr.index); - const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); - var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty); - - if (container_ty.hasWellDefinedLayout(mod)) { - const struct_obj = mod.typeToStruct(container_ty); - if (struct_obj != null and struct_obj.?.layout == .Packed) { - // packed structs are not byte addressable - deref.parent = null; - } else if (deref.parent) |*parent| { - // Update the byte offset (in-place) - try sema.resolveTypeLayout(container_ty); - const field_offset = container_ty.structFieldOffset(field_index, mod); - parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); - } - } else { - deref.parent = null; - deref.ty_without_well_defined_layout = container_ty; - } - - const tv = deref.pointee orelse { - deref.pointee = null; - break :blk deref; - }; - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; - if (!coerce_in_mem_ok) { - deref.pointee = null; - break :blk deref; - } - - if (container_ty.isSlice(mod)) { - const slice_val = tv.val.castTag(.slice).?.data; - deref.pointee = switch (field_index) { - Value.Payload.Slice.ptr_index => TypedValue{ - .ty = container_ty.slicePtrFieldType(mod), - .val = slice_val.ptr, - }, - Value.Payload.Slice.len_index => TypedValue{ - .ty = Type.usize, - .val = slice_val.len, - }, - else => unreachable, - }; - } else { - const field_ty = container_ty.structFieldType(field_index, mod); - deref.pointee = TypedValue{ - .ty = field_ty, - .val = try tv.val.fieldValue(tv.ty, mod, field_index), - }; - } - break :blk deref; - }, - }, - else => unreachable, + .opt => |opt| switch (opt.val) { + .none => return sema.fail(block, src, "attempt to use null value", .{}), + else => try sema.beginComptimePtrLoad(block, src, opt.val.toValue(), null), }, + else => unreachable, }; if (deref.pointee) |tv| { @@ -28853,7 +28641,7 @@ fn coerceCompatiblePtrs( } // The comptime Value representation is compatible with both types. return sema.addConstant(dest_ty, (try mod.intern_pool.getCoerced( - mod.gpa, + sema.gpa, try val.intern(inst_ty, mod), dest_ty.ip_index, )).toValue()); @@ -29538,7 +29326,7 @@ fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { }; } -fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void { +fn ensureFuncBodyAnalyzed(sema: *Sema, func: Module.Fn.Index) CompileError!void { sema.mod.ensureFuncBodyAnalyzed(func) catch |err| { if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; @@ -29550,6 +29338,7 @@ fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void { } fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { + const mod = sema.mod; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const decl = try anon_decl.finish( @@ -29558,15 +29347,23 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { 0, // default alignment ); try sema.maybeQueueFuncBodyAnalysis(decl); - try sema.mod.declareDeclDependency(sema.owner_decl_index, decl); - return try Value.Tag.decl_ref.create(sema.arena, decl); + try mod.declareDeclDependency(sema.owner_decl_index, decl); + const result = try mod.intern(.{ .ptr = .{ + .ty = (try mod.singleConstPtrType(ty)).ip_index, + .addr = .{ .decl = decl }, + } }); + return result.toValue(); } fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value { + const mod = sema.mod; const val = opt_val orelse return Value.null; const ptr_val = try sema.refValue(block, ty, val); - const result = try Value.Tag.opt_payload.create(sema.arena, ptr_val); - return result; + const result = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType((try mod.singleConstPtrType(ty)).ip_index)).ip_index, + .val = ptr_val.ip_index, + } }); + return result.toValue(); } fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref { @@ -29587,10 +29384,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo const ptr_ty = try mod.ptrType(.{ .elem_type = decl_tv.ty.ip_index, .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), - .is_const = if (decl_tv.val.castTag(.variable)) |payload| - !payload.data.is_mutable - else - false, + .is_const = if (decl.getVariable(mod)) |variable| variable.is_const else false, .address_space = decl.@"addrspace", }); if (analyze_fn_body) { @@ -29608,8 +29402,8 @@ fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void { const tv = try decl.typedValue(); if (tv.ty.zigTypeTag(mod) != .Fn) return; if (!try sema.fnHasRuntimeBits(tv.ty)) return; - const func = tv.val.castTag(.function) orelse return; // undef or extern_fn - try mod.ensureFuncBodyAnalysisQueued(func.data); + const func_index = mod.intern_pool.indexToFunc(tv.val.toIntern()).unwrap() orelse return; // undef or extern_fn + try mod.ensureFuncBodyAnalysisQueued(func_index); } fn analyzeRef( @@ -29622,14 +29416,12 @@ fn analyzeRef( if (try sema.resolveMaybeUndefVal(operand)) |val| { switch (val.ip_index) { - .none => switch (val.tag()) { - .extern_fn, .function => { - const decl_index = val.pointerDecl().?; - return sema.analyzeDeclRef(decl_index); - }, + .none => {}, + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), + .func => |func| return sema.analyzeDeclRef(sema.mod.funcPtr(func.index).owner_decl), else => {}, }, - else => {}, } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -29854,7 +29646,7 @@ fn analyzeIsNonErrComptimeOnly( if (other_ies.errors.count() != 0) break :blk; } - if (ies.func == sema.owner_func) { + if (ies.func == sema.owner_func_index.unwrap()) { // We're checking the inferred errorset of the current function and none of // its child inferred error sets contained any errors meaning that any value // so far with this type can't contain errors either. @@ -29873,7 +29665,7 @@ fn analyzeIsNonErrComptimeOnly( if (err_union.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - if (err_union.getError() == null) { + if (err_union.getError(mod) == null) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -30137,7 +29929,7 @@ fn analyzeSlice( const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); - const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sema.arena, sentinel_index, sema.mod); + const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sentinel_index, sema.mod); const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty, false); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, @@ -30233,7 +30025,7 @@ fn analyzeSlice( if (!new_ptr_val.isUndef(mod)) { return sema.addConstant(return_ty, (try mod.intern_pool.getCoerced( - mod.gpa, + sema.gpa, try new_ptr_val.intern(new_ptr_ty, mod), return_ty.ip_index, )).toValue()); @@ -30753,7 +30545,10 @@ fn wrapOptional( inst_src: LazySrcLoc, ) !Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(inst)) |val| { - return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, val)); + return sema.addConstant(dest_ty, (try sema.mod.intern(.{ .opt = .{ + .ty = dest_ty.ip_index, + .val = val.ip_index, + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -30771,7 +30566,10 @@ fn wrapErrorUnionPayload( const dest_payload_ty = dest_ty.errorUnionPayload(mod); const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false }); if (try sema.resolveMaybeUndefVal(coerced)) |val| { - return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val)); + return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{ + .ty = dest_ty.ip_index, + .val = .{ .payload = val.ip_index }, + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); try sema.queueFullTypeResolution(dest_payload_ty); @@ -30794,27 +30592,20 @@ fn wrapErrorUnionSet( .anyerror_type => {}, else => switch (ip.indexToKey(dest_err_set_ty.ip_index)) { .error_set_type => |error_set_type| ok: { - const expected_name = val.castTag(.@"error").?.data.name; - if (ip.getString(expected_name).unwrap()) |expected_name_interned| { - if (error_set_type.nameIndex(ip, expected_name_interned) != null) - break :ok; - } + const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + if (error_set_type.nameIndex(ip, expected_name) != null) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, .inferred_error_set_type => |ies_index| ok: { const ies = mod.inferredErrorSetPtr(ies_index); - const expected_name = val.castTag(.@"error").?.data.name; + const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name; // We carefully do this in an order that avoids unnecessarily // resolving the destination error set type. if (ies.is_anyerror) break :ok; - if (ip.getString(expected_name).unwrap()) |expected_name_interned| { - if (ies.errors.contains(expected_name_interned)) break :ok; - } - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { - break :ok; - } + if (ies.errors.contains(expected_name)) break :ok; + if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, @@ -31462,43 +31253,33 @@ pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileErro /// to a type not having its layout resolved. fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { switch (val.ip_index) { - .none => switch (val.tag()) { - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - return sema.resolveTypeLayout(ty); - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - return sema.resolveTypeLayout(ty); - }, - .comptime_field_ptr => { - const field_ptr = val.castTag(.comptime_field_ptr).?.data; - return sema.resolveLazyValue(field_ptr.field_val); - }, - .eu_payload, - .opt_payload, - => { - const sub_val = val.cast(Value.Payload.SubValue).?.data; - return sema.resolveLazyValue(sub_val); - }, - .@"union" => { - const union_val = val.castTag(.@"union").?.data; - return sema.resolveLazyValue(union_val.val); - }, - .aggregate => { - const aggregate = val.castTag(.aggregate).?.data; - for (aggregate) |elem_val| { - try sema.resolveLazyValue(elem_val); - } - }, - .slice => { - const slice = val.castTag(.slice).?.data; - try sema.resolveLazyValue(slice.ptr); - return sema.resolveLazyValue(slice.len); + .none => {}, + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => {}, + .lazy_align, .lazy_size => |lazy_ty| try sema.resolveTypeLayout(lazy_ty.toType()), + }, + .ptr => |ptr| { + switch (ptr.addr) { + .decl, .mut_decl => {}, + .int => |int| try sema.resolveLazyValue(int.toValue()), + .eu_payload, .opt_payload => |base| try sema.resolveLazyValue(base.toValue()), + .comptime_field => |comptime_field| try sema.resolveLazyValue(comptime_field.toValue()), + .elem, .field => |base_index| try sema.resolveLazyValue(base_index.base.toValue()), + } + if (ptr.len != .none) try sema.resolveLazyValue(ptr.len.toValue()); + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => {}, + .elems => |elems| for (elems) |elem| try sema.resolveLazyValue(elem.toValue()), + .repeated_elem => |elem| try sema.resolveLazyValue(elem.toValue()), + }, + .un => |un| { + try sema.resolveLazyValue(un.tag.toValue()); + try sema.resolveLazyValue(un.val.toValue()); }, - else => return, + else => {}, }, - else => return, } } @@ -31597,7 +31378,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { else blk: { const decl = mod.declPtr(struct_obj.owner_decl); var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena); + const decl_arena_allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count()); }; @@ -31662,18 +31443,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); - var sema: Sema = .{ - .mod = mod, - .gpa = gpa, - .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, - .code = zir, - .owner_decl = decl, - .owner_decl_index = decl_index, - .func = null, - .fn_ret_ty = Type.void, - .owner_func = null, - }; + var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none }; defer sema.deinit(); var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); @@ -31720,8 +31490,10 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -31974,16 +31746,23 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -32141,8 +31920,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .manyptr_const_u8_type, .manyptr_const_u8_sentinel_0_type, .single_const_pointer_to_comptime_int_type, - .const_slice_u8_type, - .const_slice_u8_sentinel_0_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, .anyerror_void_error_union_type, .generic_poison_type, .empty_struct_type, @@ -32288,18 +32067,19 @@ fn resolveInferredErrorSet( if (ies.is_resolved) return; - if (ies.func.state == .in_progress) { + const func = mod.funcPtr(ies.func); + if (func.state == .in_progress) { return sema.fail(block, src, "unable to resolve inferred error set", .{}); } // In order to ensure that all dependencies are properly added to the set, we // need to ensure the function body is analyzed of the inferred error set. // However, in the case of comptime/inline function calls with inferred error sets, - // each call gets a new InferredErrorSet object, which points to the same - // `*Module.Fn`. Not only is the function not relevant to the inferred error set + // each call gets a new InferredErrorSet object, which contains the same + // `Module.Fn.Index`. Not only is the function not relevant to the inferred error set // in this case, it may be a generic function which would cause an assertion failure // if we called `ensureFuncBodyAnalyzed` on it here. - const ies_func_owner_decl = mod.declPtr(ies.func.owner_decl); + const ies_func_owner_decl = mod.declPtr(func.owner_decl); const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, // because inline function does not create a new declaration, and the ies has been filled with analyzeCall, @@ -32414,8 +32194,10 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -32754,8 +32536,10 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -33111,7 +32895,7 @@ fn generateUnionTagTypeNumbered( const name = name: { const fqn = try union_obj.getFullyQualifiedName(mod); defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); + break :name try std.fmt.allocPrintZ(sema.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, @@ -33160,7 +32944,7 @@ fn generateUnionTagTypeSimple( const name = name: { const fqn = try union_obj.getFullyQualifiedName(mod); defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); + break :name try std.fmt.allocPrintZ(sema.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, @@ -33288,19 +33072,19 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .inferred_error_set_type, => null, - .array_type => |array_type| { - if (array_type.len == 0) - return Value.initTag(.empty_array); - if ((try sema.typeHasOnePossibleValue(array_type.child.toType())) != null) { - return Value.initTag(.the_only_possible_value); + inline .array_type, .vector_type => |seq_type| { + if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .elems = &.{} }, + } })).toValue(); + if (try sema.typeHasOnePossibleValue(seq_type.child.toType())) |opv| { + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = opv.ip_index }, + } })).toValue(); } return null; }, - .vector_type => |vector_type| { - if (vector_type.len == 0) return Value.initTag(.empty_array); - if (try sema.typeHasOnePossibleValue(vector_type.child.toType())) |v| return v; - return null; - }, .opt_type => |child| { if (child == .noreturn_type) { return try mod.nullValue(ty); @@ -33466,16 +33250,23 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -33625,10 +33416,13 @@ fn analyzeComptimeAlloc( decl.@"align" = alignment; try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); - return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .runtime_index = block.runtime_index, - .decl_index = decl_index, - })); + return sema.addConstant(ptr_type, (try sema.mod.intern(.{ .ptr = .{ + .ty = ptr_type.ip_index, + .addr = .{ .mut_decl = .{ + .decl = decl_index, + .runtime_index = block.runtime_index, + } }, + } })).toValue()); } /// The places where a user can specify an address space attribute @@ -33969,16 +33763,23 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -34337,8 +34138,9 @@ fn intFitsInType( ty: Type, vector_index: ?*usize, ) CompileError!bool { - if (ty.ip_index == .comptime_int_type) return true; const mod = sema.mod; + if (ty.ip_index == .comptime_int_type) return true; + const info = ty.intInfo(mod); switch (val.ip_index) { .undef, .zero, @@ -34346,40 +34148,8 @@ fn intFitsInType( .zero_u8, => return true, - .none => switch (val.tag()) { - .lazy_align => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); - // If it is u16 or bigger we know the alignment fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .lazy_size => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); - // If it is u64 or bigger we know the size fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - - .the_only_possible_value => { - assert(ty.intInfo(mod).bits == 0); - return true; - }, - - .decl_ref_mut, - .extern_fn, - .decl_ref, - .function, - .variable, - => { - const info = ty.intInfo(mod); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable, .extern_func, .func, .ptr => { const target = mod.getTarget(); const ptr_bits = target.ptrBitWidth(); return switch (info.signedness) { @@ -34387,27 +34157,51 @@ fn intFitsInType( .unsigned => info.bits >= ptr_bits, }; }, - - .aggregate => { - assert(ty.zigTypeTag(mod) == .Vector); - for (val.castTag(.aggregate).?.data, 0..) |elem, i| { - if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) { - if (vector_index) |some| some.* = i; - return false; - } - } - return true; + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => { + var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + return big_int.fitsInTwosComp(info.signedness, info.bits); + }, + .lazy_align => |lazy_ty| { + const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); + // If it is u16 or bigger we know the alignment fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiAlignment(lazy_ty.toType()); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, + .lazy_size => |lazy_ty| { + const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); + // If it is u64 or bigger we know the size fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiSize(lazy_ty.toType()); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, }, - - else => unreachable, - }, - - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| { - const info = ty.intInfo(mod); - var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - return big_int.fitsInTwosComp(info.signedness, info.bits); + .aggregate => |aggregate| { + assert(ty.zigTypeTag(mod) == .Vector); + return switch (aggregate.storage) { + .bytes => |bytes| for (bytes, 0..) |byte, i| { + if (byte == 0) continue; + const actual_needed_bits = std.math.log2(byte) + 1 + @boolToInt(info.signedness == .signed); + if (info.bits >= actual_needed_bits) continue; + if (vector_index) |vi| vi.* = i; + break false; + } else true, + .elems, .repeated_elem => for (switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems, + .repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem), + }, 0..) |elem, i| { + if (try sema.intFitsInType(elem.toValue(), ty.scalarType(mod), null)) continue; + if (vector_index) |vi| vi.* = i; + break false; + } else true, + }; }, else => unreachable, }, diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 569c1430d5b6..2222c1060e42 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -102,248 +102,15 @@ pub fn print( return writer.writeAll(" }"); }, - .the_only_possible_value => return writer.writeAll("0"), - .lazy_align => { - const sub_ty = val.castTag(.lazy_align).?.data; - const x = sub_ty.abiAlignment(mod); - return writer.print("{d}", .{x}); - }, - .lazy_size => { - const sub_ty = val.castTag(.lazy_size).?.data; - const x = sub_ty.abiSize(mod); - return writer.print("{d}", .{x}); - }, - .function => return writer.print("(function '{s}')", .{ - mod.declPtr(val.castTag(.function).?.data.owner_decl).name, - }), - .extern_fn => return writer.writeAll("(extern function)"), - .variable => unreachable, - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref mut '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .comptime_field_ptr => { - const payload = val.castTag(.comptime_field_ptr).?.data; - if (level == 0) { - return writer.writeAll("(comptime field ptr)"); - } - return print(.{ - .ty = payload.field_ty, - .val = payload.field_val, - }, writer, level - 1, mod); - }, - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { - try print(.{ - .ty = elem_ptr.elem_ty, - .val = elem_ptr.array_ptr, - }, writer, level - 1, mod); - } - return writer.print("[{}]", .{elem_ptr.index}); - }, - .field_ptr => { - const field_ptr = val.castTag(.field_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { - try print(.{ - .ty = field_ptr.container_ty, - .val = field_ptr.container_ptr, - }, writer, level - 1, mod); - } - - if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) { - switch (mod.intern_pool.indexToKey(field_ptr.container_ty.ip_index)) { - .anon_struct_type => |anon_struct| { - if (anon_struct.names.len == 0) { - return writer.print(".@\"{d}\"", .{field_ptr.field_index}); - } - }, - else => {}, - } - const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod); - return writer.print(".{s}", .{field_name}); - } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { - const field_name = field_ptr.container_ty.unionFields(mod).keys()[field_ptr.field_index]; - return writer.print(".{s}", .{field_name}); - } else if (field_ptr.container_ty.isSlice(mod)) { - switch (field_ptr.field_index) { - Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"), - Value.Payload.Slice.len_index => return writer.writeAll(".len"), - else => unreachable, - } - } - }, - .empty_array => return writer.writeAll(".{}"), - .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .str_lit => { const str_lit = val.castTag(.str_lit).?.data; const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); }, - .repeated => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - var i: u32 = 0; - try writer.writeAll(".{ "); - const elem_tv = TypedValue{ - .ty = ty.elemType2(mod), - .val = val.castTag(.repeated).?.data, - }; - const len = ty.arrayLen(mod); - const max_len = std.math.min(len, max_aggregate_items); - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - try print(elem_tv, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .empty_array_sentinel => { - if (level == 0) { - return writer.writeAll(".{ (sentinel) }"); - } - try writer.writeAll(".{ "); - try print(.{ - .ty = ty.elemType2(mod), - .val = ty.sentinel(mod).?, - }, writer, level - 1, mod); - return writer.writeAll(" }"); - }, - .slice => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - const payload = val.castTag(.slice).?.data; - const elem_ty = ty.elemType2(mod); - const len = payload.len.toUnsignedInt(mod); - - if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); - var buf: [max_string_len]u8 = undefined; - - var i: u32 = 0; - while (i < max_len) : (i += 1) { - const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { - error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic - }; - if (elem_val.isUndef(mod)) break :str; - buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; - } - - // TODO would be nice if this had a bit of unicode awareness. - const truncated = if (len > max_string_len) " (truncated)" else ""; - return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); - } - - try writer.writeAll(".{ "); - - const max_len = std.math.min(len, max_aggregate_items); - var i: u32 = 0; - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { - error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic - }; - try print(.{ - .ty = elem_ty, - .val = elem_val, - }, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}), - .eu_payload => { - val = val.castTag(.eu_payload).?.data; - ty = ty.errorUnionPayload(mod); - }, - .opt_payload => { - val = val.castTag(.opt_payload).?.data; - ty = ty.optionalChild(mod); - return print(.{ .ty = ty, .val = val }, writer, level, mod); - }, - .eu_payload_ptr => { - try writer.writeAll("&"); - if (level == 0) { - return writer.writeAll("(ptr)"); - } - - const data = val.castTag(.eu_payload_ptr).?.data; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = ty.toValue(), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); - - try print(.{ - .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), - .val = data.container_ptr, - }, writer, level - 1, mod); - - try writer.writeAll("))"); - return; - }, - .opt_payload_ptr => { - if (level == 0) { - return writer.writeAll("&(ptr)"); - } - - const data = val.castTag(.opt_payload_ptr).?.data; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = ty.toValue(), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); - - try print(.{ - .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), - .val = data.container_ptr, - }, writer, level - 1, mod); - - try writer.writeAll("))"); - return; - }, - // TODO these should not appear in this function .inferred_alloc => return writer.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), - .runtime_value => return writer.writeAll("[runtime value]"), }, else => { const key = mod.intern_pool.indexToKey(val.ip_index); @@ -353,6 +120,12 @@ pub fn print( switch (key) { .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), + .lazy_align => |lazy_ty| return writer.print("{d}", .{ + lazy_ty.toType().abiAlignment(mod), + }), + .lazy_size => |lazy_ty| return writer.print("{d}", .{ + lazy_ty.toType().abiSize(mod), + }), }, .enum_tag => |enum_tag| { if (level == 0) { @@ -407,7 +180,7 @@ fn printAggregate( } try print(.{ .ty = ty.structFieldType(i, mod), - .val = try val.fieldValue(ty, mod, i), + .val = try val.fieldValue(mod, i), }, writer, level - 1, mod); } if (ty.structFieldCount(mod) > max_aggregate_items) { @@ -424,7 +197,7 @@ fn printAggregate( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = try val.fieldValue(ty, mod, i); + const elem = try val.fieldValue(mod, i); if (elem.isUndef(mod)) break :str; buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; } @@ -441,7 +214,7 @@ fn printAggregate( if (i != 0) try writer.writeAll(", "); try print(.{ .ty = elem_ty, - .val = try val.fieldValue(ty, mod, i), + .val = try val.fieldValue(mod, i), }, writer, level - 1, mod); } if (len > max_aggregate_items) { diff --git a/src/Zir.zig b/src/Zir.zig index 45a6fae90be4..3afff5ba6af2 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2108,8 +2108,8 @@ pub const Inst = struct { manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type), single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), - const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), - const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type), + slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type), + slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index c9126747da04..faf158e2a43b 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -328,7 +328,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -339,6 +339,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -4311,9 +4312,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. if (try self.air.value(callee, mod)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; - + if (func_value.getFunction(mod)) |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); @@ -4353,10 +4352,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .tag = .blr, .data = .{ .reg = .x30 }, }); - } else if (func_value.castTag(.extern_fn)) |func_payload| { - const extern_fn = func_payload.data; - const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); - const lib_name = mem.sliceTo(extern_fn.lib_name, 0); + } else if (func_value.getExternFunc(mod)) |extern_func| { + const decl_name = mem.sliceTo(mod.declPtr(extern_func.decl).name, 0); + const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.MachO)) |macho_file| { const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -4627,7 +4625,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index fa8646be430e..778662fe86f1 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -334,7 +334,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -345,6 +345,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -4291,9 +4292,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. if (try self.air.value(callee, mod)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; - + if (func_value.getFunction(mod)) |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); @@ -4308,7 +4307,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier @tagName(self.target.cpu.arch), }); } - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (func_value.getExternFunc(mod)) |_| { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); @@ -4573,7 +4572,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index faa2b2b7d034..a9cd130fa89a 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -217,7 +217,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -228,6 +228,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -1745,8 +1746,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } if (try self.air.value(callee, mod)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; + if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); @@ -1760,7 +1760,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .imm12 = 0, } }, }); - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); @@ -1879,7 +1879,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 13f129f87b49..dc086dc00f02 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -260,7 +260,7 @@ const BigTomb = struct { pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -271,6 +271,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -1346,8 +1347,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // on linking. if (try self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; + if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| { const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); @@ -1374,7 +1374,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .tag = .nop, .data = .{ .nop = {} }, }); - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); @@ -1663,7 +1663,8 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2d7e4a858576..66c0399343cc 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1203,20 +1203,22 @@ fn genFunctype( pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: codegen.DebugInfoOutput, ) codegen.CodeGenError!codegen.Result { _ = src_loc; + const mod = bin_file.options.module.?; + const func = mod.funcPtr(func_index); var code_gen: CodeGen = .{ .gpa = bin_file.allocator, .air = air, .liveness = liveness, .code = code, .decl_index = func.owner_decl, - .decl = bin_file.options.module.?.declPtr(func.owner_decl), + .decl = mod.declPtr(func.owner_decl), .err_msg = undefined, .locals = .{}, .target = bin_file.options.target, @@ -2196,27 +2198,33 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const callee: ?Decl.Index = blk: { const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; - if (func_val.castTag(.function)) |function| { - _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl); - break :blk function.data.owner_decl; - } else if (func_val.castTag(.extern_fn)) |extern_fn| { - const ext_decl = mod.declPtr(extern_fn.data.owner_decl); + if (func_val.getFunction(mod)) |function| { + _ = try func.bin_file.getOrCreateAtomForDecl(function.owner_decl); + break :blk function.owner_decl; + } else if (func_val.getExternFunc(mod)) |extern_func| { + const ext_decl = mod.declPtr(extern_func.decl); const ext_info = mod.typeToFunc(ext_decl.ty).?; var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); - const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl); + const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl); const atom = func.bin_file.getAtomPtr(atom_index); - const type_index = try func.bin_file.storeDeclType(extern_fn.data.owner_decl, func_type); + const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type); try func.bin_file.addOrUpdateImport( mem.sliceTo(ext_decl.name, 0), atom.getSymbolIndex().?, - ext_decl.getExternFn().?.lib_name, + mod.intern_pool.stringToSliceUnwrap(ext_decl.getExternFunc(mod).?.lib_name), type_index, ); - break :blk extern_fn.data.owner_decl; - } else if (func_val.castTag(.decl_ref)) |decl_ref| { - _ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data); - break :blk decl_ref.data; + break :blk extern_func.decl; + } else switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| { + _ = try func.bin_file.getOrCreateAtomForDecl(decl); + break :blk decl; + }, + else => {}, + }, + else => {}, } return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()}); }; @@ -2932,29 +2940,41 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { return WValue{ .stack = {} }; } -fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue { +fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { const mod = func.bin_file.base.options.module.?; - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + const ptr = mod.intern_pool.indexToKey(ptr_val.ip_index).ptr; + switch (ptr.addr) { + .decl => |decl_index| { + return func.lowerParentPtrDecl(ptr_val, decl_index, 0); + }, + .mut_decl => |mut_decl| { + const decl_index = mut_decl.decl; + return func.lowerParentPtrDecl(ptr_val, decl_index, 0); }, - .decl_ref => { - const decl_index = ptr_val.castTag(.decl_ref).?.data; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + .int, .eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}), + .opt_payload => |base_ptr| { + return func.lowerParentPtr(base_ptr.toValue()); }, - .variable => { - const decl_index = ptr_val.castTag(.variable).?.data.owner_decl; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + .comptime_field => unreachable, + .elem => |elem| { + const index = elem.index; + const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod); + const offset = index * elem_type.abiSize(mod); + const array_ptr = try func.lowerParentPtr(elem.base.toValue()); + + return WValue{ .memory_offset = .{ + .pointer = array_ptr.memory, + .offset = @intCast(u32, offset), + } }; }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const parent_ty = field_ptr.container_ty; + .field => |field| { + const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); + const parent_ptr = try func.lowerParentPtr(field.base.toValue()); - const field_offset = switch (parent_ty.zigTypeTag(mod)) { + const offset = switch (parent_ty.zigTypeTag(mod)) { .Struct => switch (parent_ty.containerLayout(mod)) { - .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod), - else => parent_ty.structFieldOffset(field_ptr.field_index, mod), + .Packed => parent_ty.packedStructFieldByteOffset(field.index, mod), + else => parent_ty.structFieldOffset(field.index, mod), }, .Union => switch (parent_ty.containerLayout(mod)) { .Packed => 0, @@ -2964,12 +2984,12 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - const field_offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); - break :blk field_offset; + const offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); + break :blk offset; }, }, .Pointer => switch (parent_ty.ptrSize(mod)) { - .Slice => switch (field_ptr.field_index) { + .Slice => switch (field.index) { 0 => 0, 1 => func.ptrSize(), else => unreachable, @@ -2978,19 +2998,23 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue }, else => unreachable, }; - return func.lowerParentPtr(field_ptr.container_ptr, offset + @intCast(u32, field_offset)); - }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const index = elem_ptr.index; - const elem_offset = index * elem_ptr.elem_ty.abiSize(mod); - return func.lowerParentPtr(elem_ptr.array_ptr, offset + @intCast(u32, elem_offset)); - }, - .opt_payload_ptr => { - const payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - return func.lowerParentPtr(payload_ptr.container_ptr, offset); + + return switch (parent_ptr) { + .memory => |ptr_| WValue{ + .memory_offset = .{ + .pointer = ptr_, + .offset = @intCast(u32, offset), + }, + }, + .memory_offset => |mem_off| WValue{ + .memory_offset = .{ + .pointer = mem_off.pointer, + .offset = @intCast(u32, offset) + mem_off.offset, + }, + }, + else => unreachable, + }; }, - else => |tag| return func.fail("TODO: Implement lowerParentPtr for tag: {}", .{tag}), } } @@ -3045,21 +3069,97 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo( fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { const mod = func.bin_file.base.options.module.?; var val = arg_val; - if (val.castTag(.runtime_value)) |rt| { - val = rt.data; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, } if (val.isUndefDeep(mod)) return func.emitUndefined(ty); - if (val.castTag(.decl_ref)) |decl_ref| { - const decl_index = decl_ref.data; - return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); - } - if (val.castTag(.decl_ref_mut)) |decl_ref_mut| { - const decl_index = decl_ref_mut.data.decl_index; - return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); - } - switch (ty.zigTypeTag(mod)) { - .Void => return WValue{ .none = {} }, - .Int => { + + if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { + .Array => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}), + .Struct => { + const struct_obj = mod.typeToStruct(ty).?; + assert(struct_obj.layout == .Packed); + var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer + val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); + return func.lowerConstant(int_val, struct_obj.backing_int_ty); + }, + .Vector => { + assert(determineSimdStoreStrategy(ty, mod) == .direct); + var buf: [16]u8 = undefined; + val.writeToMemory(ty, mod, &buf) catch unreachable; + return func.storeSimdImmd(buf); + }, + .Frame, + .AnyFrame, + => return func.fail("Wasm TODO: LowerConstant for type {}", .{ty.fmt(mod)}), + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => return WValue{ .imm32 = switch (simple_value) { + .false => 0, + .true => 1, + else => unreachable, + } }, + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => { const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { @@ -3080,86 +3180,71 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, } }, - .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, - .Float => switch (ty.floatBits(func.target)) { - 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16, mod)) }, - 32 => return WValue{ .float32 = val.toFloat(f32, mod) }, - 64 => return WValue{ .float64 = val.toFloat(f64, mod) }, - else => unreachable, - }, - .Pointer => return switch (val.ip_index) { - .null_value => WValue{ .imm32 = 0 }, - .none => switch (val.tag()) { - .field_ptr, .elem_ptr, .opt_payload_ptr => func.lowerParentPtr(val, 0), - else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| WValue{ .imm32 = @intCast(u32, int.storage.u64) }, - else => unreachable, - }, - }, - .Enum => { - const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; - const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); - return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType()); - }, - .ErrorSet => switch (val.tag()) { - .@"error" => { - const kv = try func.bin_file.base.options.module.?.getErrorValue(val.getError().?); - return WValue{ .imm32 = kv.value }; - }, - else => return WValue{ .imm32 = 0 }, + .err => |err| { + const name = mod.intern_pool.stringToSlice(err.name); + const kv = try mod.getErrorValue(name); + return WValue{ .imm32 = kv.value }; }, - .ErrorUnion => { + .error_union => { const error_type = ty.errorUnionSet(mod); const payload_type = ty.errorUnionPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const is_pl = val.errorUnionIsPayload(); + const is_pl = val.errorUnionIsPayload(mod); const err_val = if (!is_pl) val else try mod.intValue(error_type, 0); return func.lowerConstant(err_val, error_type); } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, - .Optional => if (ty.optionalReprIsPayload(mod)) { + .enum_tag => |enum_tag| { + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType()); + }, + .float => |float| switch (float.storage) { + .f16 => |f16_val| return WValue{ .imm32 = @bitCast(u16, f16_val) }, + .f32 => |f32_val| return WValue{ .float32 = f32_val }, + .f64 => |f64_val| return WValue{ .float64 = f64_val }, + else => unreachable, + }, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl, 0), + .mut_decl => |mut_decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, mut_decl.decl, 0), + .int => |int| return func.lowerConstant(int.toValue(), mod.intern_pool.typeOf(int).toType()), + .opt_payload, .elem, .field => return func.lowerParentPtr(val), + else => return func.fail("Wasm TODO: lowerConstant for other const addr tag {}", .{ptr.addr}), + }, + .opt => if (ty.optionalReprIsPayload(mod)) { const pl_ty = ty.optionalChild(mod); - if (val.castTag(.opt_payload)) |payload| { - return func.lowerConstant(payload.data, pl_ty); - } else if (val.isNull(mod)) { - return WValue{ .imm32 = 0 }; + if (val.optionalValue(mod)) |payload| { + return func.lowerConstant(payload, pl_ty); } else { - return func.lowerConstant(val, pl_ty); + return WValue{ .imm32 = 0 }; } } else { - const is_pl = val.tag() == .opt_payload; - return WValue{ .imm32 = @boolToInt(is_pl) }; - }, - .Struct => { - const struct_obj = mod.typeToStruct(ty).?; - assert(struct_obj.layout == .Packed); - var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer - val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; - const int_val = try mod.intValue( - struct_obj.backing_int_ty, - std.mem.readIntLittle(u64, &buf), - ); - return func.lowerConstant(int_val, struct_obj.backing_int_ty); + return WValue{ .imm32 = @boolToInt(!val.isNull(mod)) }; }, - .Vector => { - assert(determineSimdStoreStrategy(ty, mod) == .direct); - var buf: [16]u8 = undefined; - val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable; - return func.storeSimdImmd(buf); - }, - .Union => { - // in this case we have a packed union which will not be passed by reference. - const union_ty = mod.typeToUnion(ty).?; - const union_obj = val.castTag(.@"union").?.data; - const field_index = ty.unionTagFieldIndex(union_obj.tag, func.bin_file.base.options.module.?).?; - const field_ty = union_ty.fields.values()[field_index].ty; - return func.lowerConstant(union_obj.val, field_ty); + .aggregate => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), + .vector_type => { + assert(determineSimdStoreStrategy(ty, mod) == .direct); + var buf: [16]u8 = undefined; + val.writeToMemory(ty, mod, &buf) catch unreachable; + return func.storeSimdImmd(buf); + }, + .struct_type, .anon_struct_type => { + const struct_obj = mod.typeToStruct(ty).?; + assert(struct_obj.layout == .Packed); + var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer + val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); + return func.lowerConstant(int_val, struct_obj.backing_int_ty); + }, + else => unreachable, }, - else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}), + .un => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), } } @@ -3221,31 +3306,33 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { .bool_true => return 1, .bool_false => return 0, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int), - .int => |int| intStorageAsI32(int.storage), - .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int), + .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod), + .int => |int| intStorageAsI32(int.storage, mod), + .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod), else => unreachable, }, } switch (ty.zigTypeTag(mod)) { .ErrorSet => { - const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function + const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError(mod).?) catch unreachable; // passed invalid `Value` to function return @bitCast(i32, kv.value); }, else => unreachable, // Programmer called this function for an illegal type } } -fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index) i32 { - return intStorageAsI32(ip.indexToKey(int).int.storage); +fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 { + return intStorageAsI32(ip.indexToKey(int).int.storage, mod); } -fn intStorageAsI32(storage: InternPool.Key.Int.Storage) i32 { +fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 { return switch (storage) { .i64 => |x| @intCast(i32, x), .u64 => |x| @bitCast(i32, @intCast(u32, x)), .big_int => unreachable, + .lazy_align => |ty| @bitCast(i32, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @bitCast(i32, @intCast(u32, ty.toType().abiSize(mod))), }; } @@ -5514,7 +5601,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // As the names are global and the slice elements are constant, we do not have // to make a copy of the ptr+value but can point towards them directly. const error_table_symbol = try func.bin_file.getErrorTableSymbol(); - const name_ty = Type.const_slice_u8_sentinel_0; + const name_ty = Type.slice_const_u8_sentinel_0; const mod = func.bin_file.base.options.module.?; const abi_size = name_ty.abiSize(mod); @@ -6935,7 +7022,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // finish function body try writer.writeByte(std.wasm.opcode(.end)); - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 77b4e6d42509..4a5532a23954 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -632,7 +632,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -643,6 +643,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -687,7 +688,7 @@ pub fn generate( @enumToInt(FrameIndex.stack_frame), FrameAlloc.init(.{ .size = 0, - .alignment = if (mod.align_stack_fns.get(module_fn)) |set_align_stack| + .alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack| set_align_stack.alignment else 1, @@ -2760,19 +2761,18 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = src_ty.childType(mod); const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits)); - var splat_pl = Value.Payload.SubValue{ - .base = .{ .tag = .repeated }, - .data = mask_val, - }; - const splat_val = Value.initPayload(&splat_pl.base); - - const full_ty = try mod.vectorType(.{ + const splat_ty = try mod.vectorType(.{ .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), .child = elem_ty.ip_index, }); - const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); + const splat_abi_size = @intCast(u32, splat_ty.abiSize(mod)); + + const splat_val = try mod.intern(.{ .aggregate = .{ + .ty = splat_ty.ip_index, + .storage = .{ .repeated_elem = mask_val.ip_index }, + } }); - const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val }); + const splat_mcv = try self.genTypedValue(.{ .ty = splat_ty, .val = splat_val.toValue() }); const splat_addr_mcv: MCValue = switch (splat_mcv) { .memory, .indirect, .load_frame => splat_mcv.address(), else => .{ .register = try self.copyToTmpRegister(Type.usize, splat_mcv.address()) }, @@ -2784,14 +2784,14 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { .{ .vp_, .@"and" }, dst_reg, dst_reg, - splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)), + splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)), ); try self.asmRegisterRegisterRegister(mir_tag, dst_reg, dst_reg, dst_reg); } else { try self.asmRegisterMemory( .{ .p_, .@"and" }, dst_reg, - splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)), + splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)), ); try self.asmRegisterRegister(mir_tag, dst_reg, dst_reg); } @@ -4893,23 +4893,14 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - var arena = std.heap.ArenaAllocator.init(self.gpa); - defer arena.deinit(); - - const ExpectedContents = struct { - repeated: Value.Payload.SubValue, - }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - const vec_ty = try mod.vectorType(.{ .len = @divExact(abi_size * 8, scalar_bits), .child = (try mod.intType(.signed, scalar_bits)).ip_index, }); const sign_val = switch (tag) { - .neg => try vec_ty.minInt(stack.get(), mod), - .fabs => try vec_ty.maxInt(stack.get(), mod, vec_ty), + .neg => try vec_ty.minInt(mod), + .fabs => try vec_ty.maxInt(mod, vec_ty), else => unreachable, }; @@ -8106,13 +8097,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. if (try self.air.value(callee, mod)) |func_value| { - if (if (func_value.castTag(.function)) |func_payload| - func_payload.data.owner_decl - else if (func_value.castTag(.decl_ref)) |decl_ref_payload| - decl_ref_payload.data - else - null) |owner_decl| - { + const func_key = mod.intern_pool.indexToKey(func_value.ip_index); + if (switch (func_key) { + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => null, + }, + else => null, + }) |owner_decl| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(owner_decl); const atom = elf_file.getAtom(atom_index); @@ -8145,10 +8138,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .disp = @intCast(i32, fn_got_addr), })); } else unreachable; - } else if (func_value.castTag(.extern_fn)) |func_payload| { - const extern_fn = func_payload.data; - const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); - const lib_name = mem.sliceTo(extern_fn.lib_name, 0); + } else if (func_value.getExternFunc(mod)) |extern_func| { + const decl_name = mem.sliceTo(mod.declPtr(extern_func.decl).name, 0); + const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom_index = try self.owner.getSymbolIndex(self); const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); @@ -8554,7 +8546,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .unreach, .{ .none, .none, .none }); diff --git a/src/codegen.zig b/src/codegen.zig index 775eb09ab048..b9b7dac90fc8 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -14,6 +14,7 @@ const Air = @import("Air.zig"); const Allocator = mem.Allocator; const Compilation = @import("Compilation.zig"); const ErrorMsg = Module.ErrorMsg; +const InternPool = @import("InternPool.zig"); const Liveness = @import("Liveness.zig"); const Module = @import("Module.zig"); const Target = std.Target; @@ -66,7 +67,7 @@ pub const DebugInfoOutput = union(enum) { pub fn generateFunction( bin_file: *link.File, src_loc: Module.SrcLoc, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -75,17 +76,17 @@ pub fn generateFunction( switch (bin_file.options.target.cpu.arch) { .arm, .armeb, - => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), .aarch64, .aarch64_be, .aarch64_32, - => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), .wasm32, .wasm64, - => return @import("arch/wasm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/wasm/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), else => unreachable, } } @@ -182,12 +183,13 @@ pub fn generateSymbol( const tracy = trace(@src()); defer tracy.end(); + const mod = bin_file.options.module.?; var typed_value = arg_tv; - if (arg_tv.val.castTag(.runtime_value)) |rt| { - typed_value.val = rt.data; + switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .runtime_value => |rt| typed_value.val = rt.val.toValue(), + else => {}, } - const mod = bin_file.options.module.?; const target = mod.getTarget(); const endian = target.cpu.arch.endian(); @@ -199,35 +201,10 @@ pub fn generateSymbol( if (typed_value.val.isUndefDeep(mod)) { const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0xaa, abi_size); - return Result.ok; + return .ok; } - switch (typed_value.ty.zigTypeTag(mod)) { - .Fn => { - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol function pointers", - .{}, - ), - }; - }, - .Float => { - switch (typed_value.ty.floatBits(target)) { - 16 => writeFloat(f16, typed_value.val.toFloat(f16, mod), target, endian, try code.addManyAsArray(2)), - 32 => writeFloat(f32, typed_value.val.toFloat(f32, mod), target, endian, try code.addManyAsArray(4)), - 64 => writeFloat(f64, typed_value.val.toFloat(f64, mod), target, endian, try code.addManyAsArray(8)), - 80 => { - writeFloat(f80, typed_value.val.toFloat(f80, mod), target, endian, try code.addManyAsArray(10)); - const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - try code.appendNTimes(0, abi_size - 10); - }, - 128 => writeFloat(f128, typed_value.val.toFloat(f128, mod), target, endian, try code.addManyAsArray(16)), - else => unreachable, - } - return Result.ok; - }, + if (typed_value.val.ip_index == .none) switch (typed_value.ty.zigTypeTag(mod)) { .Array => switch (typed_value.val.tag()) { .bytes => { const bytes = typed_value.val.castTag(.bytes).?.data; @@ -248,62 +225,6 @@ pub fn generateSymbol( } return Result.ok; }, - .aggregate => { - const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.childType(mod); - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod)); - for (elem_vals[0..len]) |elem_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = elem_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - return Result.ok; - }, - .repeated => { - const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(mod); - const sentinel = typed_value.ty.sentinel(mod); - const len = typed_value.ty.arrayLen(mod); - - var index: u64 = 0; - while (index < len) : (index += 1) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = array, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - if (sentinel) |sentinel_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = sentinel_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - return Result.ok; - }, - .empty_array_sentinel => { - const elem_ty = typed_value.ty.childType(mod); - const sentinel_val = typed_value.ty.sentinel(mod).?; - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = sentinel_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - return Result.ok; - }, else => return Result{ .fail = try ErrorMsg.create( bin_file.allocator, @@ -313,195 +234,6 @@ pub fn generateSymbol( ), }, }, - .Pointer => switch (typed_value.val.ip_index) { - .null_value => { - switch (target.ptrBitWidth()) { - 32 => { - mem.writeInt(u32, try code.addManyAsArray(4), 0, endian); - if (typed_value.ty.isSlice(mod)) try code.appendNTimes(0xaa, 4); - }, - 64 => { - mem.writeInt(u64, try code.addManyAsArray(8), 0, endian); - if (typed_value.ty.isSlice(mod)) try code.appendNTimes(0xaa, 8); - }, - else => unreachable, - } - return Result.ok; - }, - .none => switch (typed_value.val.tag()) { - .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( - bin_file, - src_loc, - typed_value, - switch (tag) { - .variable => typed_value.val.castTag(.variable).?.data.owner_decl, - .decl_ref => typed_value.val.castTag(.decl_ref).?.data, - .decl_ref_mut => typed_value.val.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, - }, - code, - debug_output, - reloc_info, - ), - .slice => { - const slice = typed_value.val.castTag(.slice).?.data; - - // generate ptr - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(mod); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = slice_ptr_field_type, - .val = slice.ptr, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - // generate length - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.usize, - .val = slice.len, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - return Result.ok; - }, - .field_ptr, .elem_ptr, .opt_payload_ptr => return lowerParentPtr( - bin_file, - src_loc, - typed_value, - typed_value.val, - code, - debug_output, - reloc_info, - ), - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for pointer type value: '{s}'", - .{@tagName(typed_value.val.tag())}, - ), - }, - }, - else => switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { - .int => { - switch (target.ptrBitWidth()) { - 32 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); - }, - 64 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - }, - else => unreachable, - } - return Result.ok; - }, - else => unreachable, - }, - }, - .Int => { - const info = typed_value.ty.intInfo(mod); - if (info.bits <= 8) { - const x: u8 = switch (info.signedness) { - .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(mod)), - .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(mod))), - }; - try code.append(x); - return Result.ok; - } - if (info.bits > 64) { - var bigint_buffer: Value.BigIntSpace = undefined; - const bigint = typed_value.val.toBigInt(&bigint_buffer, mod); - const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - const start = code.items.len; - try code.resize(start + abi_size); - bigint.writeTwosComplement(code.items[start..][0..abi_size], endian); - return Result.ok; - } - switch (info.signedness) { - .unsigned => { - if (info.bits <= 16) { - const x = @intCast(u16, typed_value.val.toUnsignedInt(mod)); - mem.writeInt(u16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(u32, typed_value.val.toUnsignedInt(mod)); - mem.writeInt(u32, try code.addManyAsArray(4), x, endian); - } else { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - } - }, - .signed => { - if (info.bits <= 16) { - const x = @intCast(i16, typed_value.val.toSignedInt(mod)); - mem.writeInt(i16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(i32, typed_value.val.toSignedInt(mod)); - mem.writeInt(i32, try code.addManyAsArray(4), x, endian); - } else { - const x = typed_value.val.toSignedInt(mod); - mem.writeInt(i64, try code.addManyAsArray(8), x, endian); - } - }, - } - return Result.ok; - }, - .Enum => { - const int_val = try typed_value.enumToInt(mod); - - const info = typed_value.ty.intInfo(mod); - if (info.bits <= 8) { - const x = @intCast(u8, int_val.toUnsignedInt(mod)); - try code.append(x); - return Result.ok; - } - if (info.bits > 64) { - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for big int enums ('{}')", - .{typed_value.ty.fmt(mod)}, - ), - }; - } - switch (info.signedness) { - .unsigned => { - if (info.bits <= 16) { - const x = @intCast(u16, int_val.toUnsignedInt(mod)); - mem.writeInt(u16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(u32, int_val.toUnsignedInt(mod)); - mem.writeInt(u32, try code.addManyAsArray(4), x, endian); - } else { - const x = int_val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - } - }, - .signed => { - if (info.bits <= 16) { - const x = @intCast(i16, int_val.toSignedInt(mod)); - mem.writeInt(i16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(i32, int_val.toSignedInt(mod)); - mem.writeInt(i32, try code.addManyAsArray(4), x, endian); - } else { - const x = int_val.toSignedInt(mod); - mem.writeInt(i64, try code.addManyAsArray(8), x, endian); - } - }, - } - return Result.ok; - }, - .Bool => { - const x: u8 = @boolToInt(typed_value.val.toBool(mod)); - try code.append(x); - return Result.ok; - }, .Struct => { if (typed_value.ty.containerLayout(mod) == .Packed) { const struct_obj = mod.typeToStruct(typed_value.ty).?; @@ -562,370 +294,497 @@ pub fn generateSymbol( return Result.ok; }, - .Union => { - const union_obj = typed_value.val.castTag(.@"union").?.data; - const layout = typed_value.ty.unionGetLayout(mod); + .Vector => switch (typed_value.val.tag()) { + .bytes => { + const bytes = typed_value.val.castTag(.bytes).?.data; + const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse + return error.Overflow; + try code.ensureUnusedCapacity(len + padding); + code.appendSliceAssumeCapacity(bytes[0..len]); + if (padding > 0) try code.writer().writeByteNTimes(0, padding); + return Result.ok; + }, + .str_lit => { + const str_lit = typed_value.val.castTag(.str_lit).?.data; + const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse + return error.Overflow; + try code.ensureUnusedCapacity(str_lit.len + padding); + code.appendSliceAssumeCapacity(bytes); + if (padding > 0) try code.writer().writeByteNTimes(0, padding); + return Result.ok; + }, + else => unreachable, + }, + .Frame, + .AnyFrame, + => return .{ .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO generateSymbol for type {}", + .{typed_value.ty.fmt(mod)}, + ) }, + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; - if (layout.payload_size == 0) { - return generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType(mod).?, - .val = union_obj.tag, - }, code, debug_output, reloc_info); + switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => try code.append(switch (simple_value) { + .false => 0, + .true => 1, + else => unreachable, + }), + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => { + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + var space: Value.BigIntSpace = undefined; + const val = typed_value.val.toBigInt(&space, mod); + val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian); + }, + .err => |err| { + const name = mod.intern_pool.stringToSlice(err.name); + const kv = try mod.getErrorValue(name); + try code.writer().writeInt(u16, @intCast(u16, kv.value), endian); + }, + .error_union => |error_union| { + const payload_ty = typed_value.ty.errorUnionPayload(mod); + + const err_val = switch (error_union.val) { + .err_name => |err_name| @intCast(u16, (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value), + .payload => @as(u16, 0), + }; + + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + try code.writer().writeInt(u16, err_val, endian); + return .ok; } - // Check if we should store the tag first. - if (layout.tag_align >= layout.payload_align) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType(mod).?, - .val = union_obj.tag, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const abi_align = typed_value.ty.abiAlignment(mod); + + // error value first when its type is larger than the error union's payload + if (error_align > payload_align) { + try code.writer().writeInt(u16, err_val, endian); } - const union_ty = mod.typeToUnion(typed_value.ty).?; - const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?; - assert(union_ty.haveFieldTypes()); - const field_ty = union_ty.fields.values()[field_index].ty; - if (!field_ty.hasRuntimeBits(mod)) { - try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); - } else { + // emit payload part of the error union + { + const begin = code.items.len; switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = union_obj.val, + .ty = payload_ty, + .val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, - .fail => |em| return Result{ .fail = em }, + .fail => |em| return .{ .fail = em }, } + const unpadded_end = code.items.len - begin; + const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; - const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow; if (padding > 0) { try code.writer().writeByteNTimes(0, padding); } } - if (layout.tag_size > 0) { + // Payload size is larger than error set, so emit our error set last + if (error_align <= payload_align) { + const begin = code.items.len; + try code.writer().writeInt(u16, err_val, endian); + const unpadded_end = code.items.len - begin; + const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; + + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } + } + }, + .enum_tag => |enum_tag| { + const int_tag_ty = try typed_value.ty.intTagType(mod); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = int_tag_ty, + .val = (try mod.intern_pool.getCoerced(mod.gpa, enum_tag.int, int_tag_ty.ip_index)).toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + }, + .float => |float| switch (float.storage) { + .f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(2)), + .f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(4)), + .f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)), + .f80 => |f80_val| { + writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10)); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + try code.appendNTimes(0, abi_size - 10); + }, + .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)), + }, + .ptr => |ptr| { + // generate ptr + switch (try lowerParentPtr(bin_file, src_loc, switch (ptr.len) { + .none => typed_value.val, + else => typed_value.val.slicePtr(mod), + }.ip_index, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + if (ptr.len != .none) { + // generate len switch (try generateSymbol(bin_file, src_loc, .{ - .ty = union_ty.tag_ty, - .val = union_obj.tag, + .ty = Type.usize, + .val = ptr.len.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } } - - if (layout.padding > 0) { - try code.writer().writeByteNTimes(0, layout.padding); - } - - return Result.ok; }, - .Optional => { + .opt => { const payload_type = typed_value.ty.optionalChild(mod); - const is_pl = !typed_value.val.isNull(mod); + const payload_val = typed_value.val.optionalValue(mod); const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - if (!payload_type.hasRuntimeBits(mod)) { - try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size); - return Result.ok; - } - if (typed_value.ty.optionalReprIsPayload(mod)) { - if (typed_value.val.castTag(.opt_payload)) |payload| { + if (payload_val) |value| { switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, - .val = payload.data, + .val = value, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - } else if (!typed_value.val.isNull(mod)) { + } else { + try code.writer().writeByteNTimes(0, abi_size); + } + } else { + const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; + if (payload_type.hasRuntimeBits(mod)) { + const value = payload_val orelse (try mod.intern(.{ .undef = payload_type.ip_index })).toValue(); switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, - .val = typed_value.val, + .val = value, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - } else { - try code.writer().writeByteNTimes(0, abi_size); } - - return Result.ok; + try code.writer().writeByte(@boolToInt(payload_val != null)); + try code.writer().writeByteNTimes(0, padding); } + }, + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.ip_index)) { + .array_type => |array_type| { + var index: u64 = 0; + while (index < array_type.len) : (index += 1) { + switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => switch (try generateSymbol(bin_file, src_loc, .{ + .ty = array_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + }, + } + } - const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; - const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.undef; - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_type, - .val = value, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - try code.writer().writeByte(@boolToInt(is_pl)); - try code.writer().writeByteNTimes(0, padding); + if (array_type.sentinel != .none) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = array_type.child.toType(), + .val = array_type.sentinel.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + } + }, + .vector_type => |vector_type| { + var index: u32 = 0; + while (index < vector_type.len) : (index += 1) { + switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => switch (try generateSymbol(bin_file, src_loc, .{ + .ty = vector_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + }, + } + } - return Result.ok; + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - + (math.divCeil(u64, vector_type.child.toType().bitSize(mod) * vector_type.len, 8) catch |err| switch (err) { + error.DivisionByZero => unreachable, + else => |e| return e, + })) orelse return error.Overflow; + if (padding > 0) try code.writer().writeByteNTimes(0, padding); + }, + .struct_type, .anon_struct_type => { + if (typed_value.ty.containerLayout(mod) == .Packed) { + const struct_obj = mod.typeToStruct(typed_value.ty).?; + const fields = struct_obj.fields.values(); + const field_vals = typed_value.val.castTag(.aggregate).?.data; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + const current_pos = code.items.len; + try code.resize(current_pos + abi_size); + var bits: u16 = 0; + + for (field_vals, 0..) |field_val, index| { + const field_ty = fields[index].ty; + // pointer may point to a decl which must be marked used + // but can also result in a relocation. Therefore we handle those seperately. + if (field_ty.zigTypeTag(mod) == .Pointer) { + const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow; + var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); + defer tmp_list.deinit(); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty, + .val = field_val, + }, &tmp_list, debug_output, reloc_info)) { + .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), + .fail => |em| return Result{ .fail = em }, + } + } else { + field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; + } + bits += @intCast(u16, field_ty.bitSize(mod)); + } + } else { + const struct_begin = code.items.len; + const field_vals = typed_value.val.castTag(.aggregate).?.data; + for (field_vals, 0..) |field_val, index| { + const field_ty = typed_value.ty.structFieldType(index, mod); + if (!field_ty.hasRuntimeBits(mod)) continue; + + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty, + .val = field_val, + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + const unpadded_field_end = code.items.len - struct_begin; + + // Pad struct members if required + const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); + const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; + + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } + } + } + }, + else => unreachable, }, - .ErrorUnion => { - const error_ty = typed_value.ty.errorUnionSet(mod); - const payload_ty = typed_value.ty.errorUnionPayload(mod); - const is_payload = typed_value.val.errorUnionIsPayload(); + .un => |un| { + const layout = typed_value.ty.unionGetLayout(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const err_val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val; + if (layout.payload_size == 0) { return generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = err_val, + .ty = typed_value.ty.unionTagType(mod).?, + .val = un.tag.toValue(), }, code, debug_output, reloc_info); } - const payload_align = payload_ty.abiAlignment(mod); - const error_align = Type.anyerror.abiAlignment(mod); - const abi_align = typed_value.ty.abiAlignment(mod); - - // error value first when its type is larger than the error union's payload - if (error_align > payload_align) { + // Check if we should store the tag first. + if (layout.tag_align >= layout.payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, + .ty = typed_value.ty.unionTagType(mod).?, + .val = un.tag.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } } - // emit payload part of the error union - { - const begin = code.items.len; - const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.undef; + const union_ty = mod.typeToUnion(typed_value.ty).?; + const field_index = typed_value.ty.unionTagFieldIndex(un.tag.toValue(), mod).?; + assert(union_ty.haveFieldTypes()); + const field_ty = union_ty.fields.values()[field_index].ty; + if (!field_ty.hasRuntimeBits(mod)) { + try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); + } else { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_ty, - .val = payload_val, + .ty = field_ty, + .val = un.val.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); - const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; + const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow; if (padding > 0) { try code.writer().writeByteNTimes(0, padding); } } - // Payload size is larger than error set, so emit our error set last - if (error_align <= payload_align) { - const begin = code.items.len; + if (layout.tag_size > 0) { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, + .ty = union_ty.tag_ty, + .val = un.tag.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); - const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; - - if (padding > 0) { - try code.writer().writeByteNTimes(0, padding); - } - } - - return Result.ok; - }, - .ErrorSet => { - switch (typed_value.val.tag()) { - .@"error" => { - const name = typed_value.val.getError().?; - const kv = try bin_file.options.module.?.getErrorValue(name); - try code.writer().writeInt(u32, kv.value, endian); - }, - else => { - try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(mod))); - }, } - return Result.ok; }, - .Vector => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(len + padding); - code.appendSliceAssumeCapacity(bytes[0..len]); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - .aggregate => { - const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.childType(mod); - const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - - (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { - error.DivisionByZero => unreachable, - else => |e| return e, - })) orelse return error.Overflow; - for (elem_vals[0..len]) |elem_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = elem_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - .repeated => { - const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(mod); - const len = typed_value.ty.arrayLen(mod); - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - - (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { - error.DivisionByZero => unreachable, - else => |e| return e, - })) orelse return error.Overflow; - var index: u64 = 0; - while (index < len) : (index += 1) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = array, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(str_lit.len + padding); - code.appendSliceAssumeCapacity(bytes); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - else => unreachable, - }, - else => |tag| return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for type '{s}'", - .{@tagName(tag)}, - ) }, } + return .ok; } fn lowerParentPtr( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, - parent_ptr: Value, + parent_ptr: InternPool.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { const mod = bin_file.options.module.?; - switch (parent_ptr.tag()) { - .field_ptr => { - const field_ptr = parent_ptr.castTag(.field_ptr).?.data; + const ptr = mod.intern_pool.indexToKey(parent_ptr).ptr; + assert(ptr.len == .none); + return switch (ptr.addr) { + .decl, .mut_decl => try lowerDeclRef( + bin_file, + src_loc, + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }, + code, + debug_output, + reloc_info, + ), + .int => |int| try generateSymbol(bin_file, src_loc, .{ + .ty = Type.usize, + .val = int.toValue(), + }, code, debug_output, reloc_info), + .eu_payload => |eu_payload| try lowerParentPtr( + bin_file, + src_loc, + eu_payload, + code, + debug_output, + reloc_info.offset(@intCast(u32, errUnionPayloadOffset( + mod.intern_pool.typeOf(eu_payload).toType(), + mod, + ))), + ), + .opt_payload => |opt_payload| try lowerParentPtr( + bin_file, + src_loc, + opt_payload, + code, + debug_output, + reloc_info, + ), + .elem => |elem| try lowerParentPtr( + bin_file, + src_loc, + elem.base, + code, + debug_output, + reloc_info.offset(@intCast(u32, elem.index * + mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))), + ), + .field => |field| { + const base_type = mod.intern_pool.typeOf(field.base); return lowerParentPtr( bin_file, src_loc, - typed_value, - field_ptr.container_ptr, + field.base, code, debug_output, - reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag(mod)) { - .Pointer => offset: { - assert(field_ptr.container_ty.isSlice(mod)); - break :offset switch (field_ptr.field_index) { + reloc_info.offset(switch (mod.intern_pool.indexToKey(base_type)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .One, .Many, .C => unreachable, + .Slice => switch (field.index) { 0 => 0, - 1 => field_ptr.container_ty.slicePtrFieldType(mod).abiSize(mod), + 1 => @divExact(mod.getTarget().ptrBitWidth(), 8), else => unreachable, - }; + }, }, - .Struct, .Union => field_ptr.container_ty.structFieldOffset( - field_ptr.field_index, + .struct_type, + .anon_struct_type, + .union_type, + => @intCast(u32, base_type.toType().childType(mod).structFieldOffset( + @intCast(u32, field.index), mod, - ), - else => return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement lowerParentPtr for field_ptr with a container of type {}", - .{field_ptr.container_ty.fmt(bin_file.options.module.?)}, - ) }, - })), - ); - }, - .elem_ptr => { - const elem_ptr = parent_ptr.castTag(.elem_ptr).?.data; - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - elem_ptr.array_ptr, - code, - debug_output, - reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(mod))), - ); - }, - .opt_payload_ptr => { - const opt_payload_ptr = parent_ptr.castTag(.opt_payload_ptr).?.data; - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - opt_payload_ptr.container_ptr, - code, - debug_output, - reloc_info, - ); - }, - .eu_payload_ptr => { - const eu_payload_ptr = parent_ptr.castTag(.eu_payload_ptr).?.data; - const pl_ty = eu_payload_ptr.container_ty.errorUnionPayload(mod); - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - eu_payload_ptr.container_ptr, - code, - debug_output, - reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, mod))), + )), + else => unreachable, + }), ); }, - .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( - bin_file, - src_loc, - typed_value, - switch (tag) { - .variable => parent_ptr.castTag(.variable).?.data.owner_decl, - .decl_ref => parent_ptr.castTag(.decl_ref).?.data, - .decl_ref_mut => parent_ptr.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, - }, - code, - debug_output, - reloc_info, - ), - else => |tag| return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement lowerParentPtr for type '{s}'", - .{@tagName(tag)}, - ) }, - } + .comptime_field => unreachable, + }; } const RelocInfo = struct { @@ -940,36 +799,15 @@ const RelocInfo = struct { fn lowerDeclRef( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, decl_index: Module.Decl.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { + _ = src_loc; + _ = debug_output; const target = bin_file.options.target; const mod = bin_file.options.module.?; - if (typed_value.ty.isSlice(mod)) { - // generate ptr - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(mod); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = slice_ptr_field_type, - .val = typed_value.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - // generate length - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.usize, - .val = try mod.intValue(Type.usize, typed_value.val.sliceLen(mod)), - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - return Result.ok; - } const ptr_width = target.ptrBitWidth(); const decl = mod.declPtr(decl_index); @@ -1154,12 +992,13 @@ pub fn genTypedValue( arg_tv: TypedValue, owner_decl_index: Module.Decl.Index, ) CodeGenError!GenResult { + const mod = bin_file.options.module.?; var typed_value = arg_tv; - if (typed_value.val.castTag(.runtime_value)) |rt| { - typed_value.val = rt.data; + switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .runtime_value => |rt| typed_value.val = rt.val.toValue(), + else => {}, } - const mod = bin_file.options.module.?; log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmt(mod), typed_value.val.fmtValue(typed_value.ty, mod), @@ -1171,17 +1010,14 @@ pub fn genTypedValue( const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); - if (!typed_value.ty.isSlice(mod)) { - if (typed_value.val.castTag(.variable)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data.owner_decl); - } - if (typed_value.val.castTag(.decl_ref)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data); - } - if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data.decl_index); - } - } + if (!typed_value.ty.isSlice(mod)) switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| return genDeclRef(bin_file, src_loc, typed_value, decl), + .mut_decl => |mut_decl| return genDeclRef(bin_file, src_loc, typed_value, mut_decl.decl), + else => {}, + }, + else => {}, + }; switch (typed_value.ty.zigTypeTag(mod)) { .Void => return GenResult.mcv(.none), @@ -1215,11 +1051,9 @@ pub fn genTypedValue( }, .Optional => { if (typed_value.ty.isPtrLikeOptional(mod)) { - if (typed_value.val.ip_index == .null_value) return GenResult.mcv(.{ .immediate = 0 }); - return genTypedValue(bin_file, src_loc, .{ .ty = typed_value.ty.optionalChild(mod), - .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val, + .val = typed_value.val.optionalValue(mod) orelse return GenResult.mcv(.{ .immediate = 0 }), }, owner_decl_index); } else if (typed_value.ty.abiSize(mod) == 1) { return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull(mod)) }); @@ -1234,24 +1068,15 @@ pub fn genTypedValue( }, owner_decl_index); }, .ErrorSet => { - switch (typed_value.val.tag()) { - .@"error" => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return GenResult.mcv(.{ .immediate = error_index }); - }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return GenResult.mcv(.{ .immediate = 0 }); - }, - } + const err_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(typed_value.val.ip_index).err.name); + const global_error_set = mod.global_error_set; + const error_index = global_error_set.get(err_name).?; + return GenResult.mcv(.{ .immediate = error_index }); }, .ErrorUnion => { const error_type = typed_value.ty.errorUnionSet(mod); const payload_type = typed_value.ty.errorUnionPayload(mod); - const is_pl = typed_value.val.errorUnionIsPayload(); + const is_pl = typed_value.val.errorUnionIsPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d3b8e06e5d0d..1bb8130b1fc2 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -257,7 +257,7 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { return .{ .data = ident }; } -/// This data is available when outputting .c code for a `*Module.Fn`. +/// This data is available when outputting .c code for a `Module.Fn.Index`. /// It is not available when generating .h file. pub const Function = struct { air: Air, @@ -268,7 +268,7 @@ pub const Function = struct { next_block_index: usize = 0, object: Object, lazy_fns: LazyFnMap, - func: *Module.Fn, + func_index: Module.Fn.Index, /// All the locals, to be emitted at the top of the function. locals: std.ArrayListUnmanaged(Local) = .{}, /// Which locals are available for reuse, based on Type. @@ -549,33 +549,12 @@ pub const DeclGen = struct { } // Chase function values in order to be able to reference the original function. - inline for (.{ .function, .extern_fn }) |tag| - if (decl.val.castTag(tag)) |func| - if (func.data.owner_decl != decl_index) - return dg.renderDeclValue(writer, ty, val, func.data.owner_decl, location); + if (decl.getFunction(mod)) |func| if (func.owner_decl != decl_index) + return dg.renderDeclValue(writer, ty, val, func.owner_decl, location); + if (decl.getExternFunc(mod)) |extern_func| if (extern_func.decl != decl_index) + return dg.renderDeclValue(writer, ty, val, extern_func.decl, location); - if (decl.val.castTag(.variable)) |var_payload| - try dg.renderFwdDecl(decl_index, var_payload.data); - - if (ty.isSlice(mod)) { - if (location == .StaticInitializer) { - try writer.writeByte('{'); - } else { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeAll("){ .ptr = "); - } - - try dg.renderValue(writer, ty.slicePtrFieldType(mod), val.slicePtr(mod), .Initializer); - - const len_val = try mod.intValue(Type.usize, val.sliceLen(mod)); - - if (location == .StaticInitializer) { - return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); - } else { - return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); - } - } + if (decl.getVariable(mod)) |variable| try dg.renderFwdDecl(decl_index, variable); // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function @@ -594,125 +573,77 @@ pub const DeclGen = struct { /// Renders a "parent" pointer by recursing to the root decl/variable /// that its contents are defined with respect to. - /// - /// Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr fn renderParentPtr( dg: *DeclGen, writer: anytype, - ptr_val: Value, - ptr_ty: Type, + ptr_val: InternPool.Index, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { const mod = dg.module; - - if (!ptr_ty.isSlice(mod)) { - try writer.writeByte('('); - try dg.renderType(writer, ptr_ty); - try writer.writeByte(')'); - } - if (ptr_val.ip_index != .none) switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { - .int => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}), - else => unreachable, - }; - switch (ptr_val.tag()) { - .decl_ref_mut, .decl_ref, .variable => { - const decl_index = switch (ptr_val.tag()) { - .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, - .variable => ptr_val.castTag(.variable).?.data.owner_decl, + const ptr_ty = mod.intern_pool.typeOf(ptr_val).toType(); + const ptr = mod.intern_pool.indexToKey(ptr_val).ptr; + switch (ptr.addr) { + .decl, .mut_decl => try dg.renderDeclValue( + writer, + ptr_ty, + ptr_val.toValue(), + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, - }; - try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location); + }, + location, + ), + .int => |int| try writer.print("{x}", .{ + try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other), + }), + .eu_payload, .opt_payload => |base| { + const base_ty = mod.intern_pool.typeOf(base).toType().childType(mod); + // Ensure complete type definition is visible before accessing fields. + _ = try dg.typeToIndex(base_ty, .complete); + try writer.writeAll("&("); + try dg.renderParentPtr(writer, base, location); + try writer.writeAll(")->payload"); }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - + .elem => |elem| { + try writer.writeAll("&("); + try dg.renderParentPtr(writer, elem.base, location); + try writer.print(")[{d}]", .{elem.index}); + }, + .field => |field| { + const base_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); // Ensure complete type definition is visible before accessing fields. - _ = try dg.typeToIndex(field_ptr.container_ty, .complete); - - const container_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, field_ptr.container_ty); - - switch (fieldLocation( - field_ptr.container_ty, - ptr_ty, - @intCast(u32, field_ptr.field_index), - mod, - )) { - .begin => try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ), - .field => |field| { + _ = try dg.typeToIndex(base_ty, .complete); + switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) { + .begin => try dg.renderParentPtr(writer, field.base, location), + .field => |name| { try writer.writeAll("&("); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.writeAll(")->"); - try dg.writeCValue(writer, field); + try dg.writeCValue(writer, name); }, .byte_offset => |byte_offset| { const u8_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, Type.u8); - const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try dg.renderType(writer, u8_ptr_ty); try writer.writeByte(')'); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.print(" + {})", .{ try dg.fmtIntLiteral(Type.usize, byte_offset_val, .Other), }); }, .end => { try writer.writeAll("(("); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.print(") + {})", .{ try dg.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1), .Other), }); }, } }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const elem_ptr_ty = try mod.ptrType(.{ - .size = .C, - .elem_type = elem_ptr.elem_ty.ip_index, - }); - - try writer.writeAll("&("); - try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location); - try writer.print(")[{d}]", .{elem_ptr.index}); - }, - .opt_payload_ptr, .eu_payload_ptr => { - const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - const container_ptr_ty = try mod.ptrType(.{ - .elem_type = payload_ptr.container_ty.ip_index, - .size = .C, - }); - - // Ensure complete type definition is visible before accessing fields. - _ = try dg.typeToIndex(payload_ptr.container_ty, .complete); - - try writer.writeAll("&("); - try dg.renderParentPtr(writer, payload_ptr.container_ptr, container_ptr_ty, location); - try writer.writeAll(")->payload"); - }, - else => unreachable, + .comptime_field => unreachable, } } @@ -723,11 +654,12 @@ pub const DeclGen = struct { arg_val: Value, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { + const mod = dg.module; var val = arg_val; - if (val.castTag(.runtime_value)) |rt| { - val = rt.data; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, } - const mod = dg.module; const target = mod.getTarget(); const initializer_type: ValueRenderLocation = switch (location) { .StaticInitializer => .StaticInitializer, @@ -928,175 +860,8 @@ pub const DeclGen = struct { } unreachable; } - switch (ty.zigTypeTag(mod)) { - .Int => switch (val.tag()) { - .field_ptr, - .elem_ptr, - .opt_payload_ptr, - .eu_payload_ptr, - .decl_ref_mut, - .decl_ref, - => try dg.renderParentPtr(writer, val, ty, location), - else => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}), - }, - .Float => { - const bits = ty.floatBits(target); - const f128_val = val.toFloat(f128, mod); - - // All unsigned ints matching float types are pre-allocated. - const repr_ty = mod.intType(.unsigned, bits) catch unreachable; - - assert(bits <= 128); - var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; - var repr_val_big = BigInt.Mutable{ - .limbs = &repr_val_limbs, - .len = undefined, - .positive = undefined, - }; - switch (bits) { - 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), - 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), - 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), - 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), - 128 => repr_val_big.set(@bitCast(u128, f128_val)), - else => unreachable, - } - - const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst()); - - try writer.writeAll("zig_cast_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte(' '); - var empty = true; - if (std.math.isFinite(f128_val)) { - try writer.writeAll("zig_make_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte('('); - switch (bits) { - 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}), - 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}), - 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}), - 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}), - 128 => try writer.print("{x}", .{f128_val}), - else => unreachable, - } - try writer.writeAll(", "); - empty = false; - } else { - // isSignalNan is equivalent to isNan currently, and MSVC doens't have nans, so prefer nan - const operation = if (std.math.isNan(f128_val)) - "nan" - else if (std.math.isSignalNan(f128_val)) - "nans" - else if (std.math.isInf(f128_val)) - "inf" - else - unreachable; - - if (location == .StaticInitializer) { - if (!std.math.isNan(f128_val) and std.math.isSignalNan(f128_val)) - return dg.fail("TODO: C backend: implement nans rendering in static initializers", .{}); - - // MSVC doesn't have a way to define a custom or signaling NaN value in a constant expression - - // TODO: Re-enable this check, otherwise we're writing qnan bit patterns on msvc incorrectly - // if (std.math.isNan(f128_val) and f128_val != std.math.qnan_f128) - // return dg.fail("Only quiet nans are supported in global variable initializers", .{}); - } - - try writer.writeAll("zig_"); - try writer.writeAll(if (location == .StaticInitializer) "init" else "make"); - try writer.writeAll("_special_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte('('); - if (std.math.signbit(f128_val)) try writer.writeByte('-'); - try writer.writeAll(", "); - try writer.writeAll(operation); - try writer.writeAll(", "); - if (std.math.isNan(f128_val)) switch (bits) { - // We only actually need to pass the significand, but it will get - // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), - 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), - 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), - 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), - 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), - else => unreachable, - }; - try writer.writeAll(", "); - empty = false; - } - try writer.print("{x}", .{try dg.fmtIntLiteral(repr_ty, repr_val, location)}); - if (!empty) try writer.writeByte(')'); - return; - }, - .Pointer => switch (val.ip_index) { - .null_value => if (ty.isSlice(mod)) { - var slice_pl = Value.Payload.Slice{ - .base = .{ .tag = .slice }, - .data = .{ .ptr = val, .len = Value.undef }, - }; - const slice_val = Value.initPayload(&slice_pl.base); - - return dg.renderValue(writer, ty, slice_val, location); - } else { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - try writer.writeAll(")NULL)"); - }, - .none => switch (val.tag()) { - .variable => { - const decl = val.castTag(.variable).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - .slice => { - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const slice = val.castTag(.slice).?.data; - - try writer.writeByte('{'); - try dg.renderValue(writer, ty.slicePtrFieldType(mod), slice.ptr, initializer_type); - try writer.writeAll(", "); - try dg.renderValue(writer, Type.usize, slice.len, initializer_type); - try writer.writeByte('}'); - }, - .function => { - const func = val.castTag(.function).?.data; - try dg.renderDeclName(writer, func.owner_decl, 0); - }, - .extern_fn => { - const extern_fn = val.castTag(.extern_fn).?.data; - try dg.renderDeclName(writer, extern_fn.owner_decl, 0); - }, - .lazy_align, .lazy_size => { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); - }, - .field_ptr, - .elem_ptr, - .opt_payload_ptr, - .eu_payload_ptr, - .decl_ref_mut, - .decl_ref, - => try dg.renderParentPtr(writer, val, ty, location), - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); - }, - else => unreachable, - }, - }, + if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { .Array, .Vector => { if (location == .FunctionArgument) { try writer.writeByte('('); @@ -1129,17 +894,6 @@ pub const DeclGen = struct { return; }, .none => switch (val.tag()) { - .empty_array => { - const ai = ty.arrayInfo(mod); - try writer.writeByte('{'); - if (ai.sentinel) |s| { - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } else { - try writer.writeByte('0'); - } - try writer.writeByte('}'); - return; - }, .bytes, .str_lit => |t| { const bytes = switch (t) { .bytes => val.castTag(.bytes).?.data, @@ -1210,91 +964,6 @@ pub const DeclGen = struct { try writer.writeByte('}'); } }, - .Bool => { - if (val.toBool(mod)) { - return writer.writeAll("true"); - } else { - return writer.writeAll("false"); - } - }, - .Optional => { - const payload_ty = ty.optionalChild(mod); - - const is_null_val = Value.makeBool(val.ip_index == .null_value); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) - return dg.renderValue(writer, Type.bool, is_null_val, location); - - if (ty.optionalReprIsPayload(mod)) { - const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else val; - return dg.renderValue(writer, payload_ty, payload_val, location); - } - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else Value.undef; - - try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, initializer_type); - try writer.writeAll(", .is_null = "); - try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); - try writer.writeAll(" }"); - }, - .ErrorSet => { - if (val.castTag(.@"error")) |error_pl| { - // Error values are already defined by genErrDecls. - try writer.print("zig_error_{}", .{fmtIdent(error_pl.data.name)}); - } else { - try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, .Other)}); - } - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - const error_ty = ty.errorUnionSet(mod); - const error_val = if (val.errorUnionIsPayload()) try mod.intValue(Type.anyerror, 0) else val; - - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return dg.renderValue(writer, error_ty, error_val, location); - } - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; - try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, initializer_type); - try writer.writeAll(", .error = "); - try dg.renderValue(writer, error_ty, error_val, initializer_type); - try writer.writeAll(" }"); - }, - .Enum => switch (val.ip_index) { - .none => { - const int_tag_ty = try ty.intTagType(mod); - return dg.renderValue(writer, int_tag_ty, val, location); - }, - else => { - const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; - const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); - return dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location); - }, - }, - .Fn => switch (val.tag()) { - .function => { - const decl = val.castTag(.function).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - .extern_fn => { - const decl = val.castTag(.extern_fn).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - else => unreachable, - }, .Struct => switch (ty.containerLayout(mod)) { .Auto, .Extern => { const field_vals = val.castTag(.aggregate).?.data; @@ -1408,7 +1077,448 @@ pub const DeclGen = struct { } }, }, - .Union => { + + .Frame, + .AnyFrame, + => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ + @tagName(tag), + }), + + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => try writer.writeAll(@tagName(simple_value)), + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}), + .lazy_align, .lazy_size => { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); + }, + }, + .err => |err| try writer.print("zig_error_{}", .{ + fmtIdent(mod.intern_pool.stringToSlice(err.name)), + }), + .error_union => |error_union| { + const payload_ty = ty.errorUnionPayload(mod); + const error_ty = ty.errorUnionSet(mod); + const error_val = if (val.errorUnionIsPayload(mod)) try mod.intValue(Type.anyerror, 0) else val; + + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return dg.renderValue(writer, error_ty, error_val, location); + } + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + const payload_val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(); + + try writer.writeAll("{ .payload = "); + try dg.renderValue(writer, payload_ty, payload_val, initializer_type); + try writer.writeAll(", .error = "); + try dg.renderValue(writer, error_ty, error_val, initializer_type); + try writer.writeAll(" }"); + }, + .enum_tag => { + const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + try dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location); + }, + .float => { + const bits = ty.floatBits(target); + const f128_val = val.toFloat(f128, mod); + + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; + + assert(bits <= 128); + var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; + var repr_val_big = BigInt.Mutable{ + .limbs = &repr_val_limbs, + .len = undefined, + .positive = undefined, + }; + + switch (bits) { + 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), + 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), + 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), + 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), + 128 => repr_val_big.set(@bitCast(u128, f128_val)), + else => unreachable, + } + + const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst()); + + try writer.writeAll("zig_cast_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte(' '); + var empty = true; + if (std.math.isFinite(f128_val)) { + try writer.writeAll("zig_make_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + switch (bits) { + 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}), + 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}), + 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}), + 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}), + 128 => try writer.print("{x}", .{f128_val}), + else => unreachable, + } + try writer.writeAll(", "); + empty = false; + } else { + // isSignalNan is equivalent to isNan currently, and MSVC doens't have nans, so prefer nan + const operation = if (std.math.isNan(f128_val)) + "nan" + else if (std.math.isSignalNan(f128_val)) + "nans" + else if (std.math.isInf(f128_val)) + "inf" + else + unreachable; + + if (location == .StaticInitializer) { + if (!std.math.isNan(f128_val) and std.math.isSignalNan(f128_val)) + return dg.fail("TODO: C backend: implement nans rendering in static initializers", .{}); + + // MSVC doesn't have a way to define a custom or signaling NaN value in a constant expression + + // TODO: Re-enable this check, otherwise we're writing qnan bit patterns on msvc incorrectly + // if (std.math.isNan(f128_val) and f128_val != std.math.qnan_f128) + // return dg.fail("Only quiet nans are supported in global variable initializers", .{}); + } + + try writer.writeAll("zig_"); + try writer.writeAll(if (location == .StaticInitializer) "init" else "make"); + try writer.writeAll("_special_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + if (std.math.signbit(f128_val)) try writer.writeByte('-'); + try writer.writeAll(", "); + try writer.writeAll(operation); + try writer.writeAll(", "); + if (std.math.isNan(f128_val)) switch (bits) { + // We only actually need to pass the significand, but it will get + // properly masked anyway, so just pass the whole value. + 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), + 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), + 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), + 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), + 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), + else => unreachable, + }; + try writer.writeAll(", "); + empty = false; + } + try writer.print("{x}", .{try dg.fmtIntLiteral(repr_ty, repr_val, location)}); + if (!empty) try writer.writeByte(')'); + }, + .ptr => |ptr| { + if (ptr.len != .none) { + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + try writer.writeByte('{'); + } + switch (ptr.addr) { + .decl, .mut_decl => try dg.renderDeclValue( + writer, + ty, + val, + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }, + location, + ), + .int => |int| { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + try writer.print("){x})", .{ + try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other), + }); + }, + .eu_payload, + .opt_payload, + .elem, + .field, + => try dg.renderParentPtr(writer, val.ip_index, location), + .comptime_field => unreachable, + } + if (ptr.len != .none) { + try writer.writeAll(", "); + try dg.renderValue(writer, Type.usize, ptr.len.toValue(), initializer_type); + try writer.writeByte('}'); + } + }, + .opt => |opt| { + const payload_ty = ty.optionalChild(mod); + + const is_null_val = Value.makeBool(opt.val == .none); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) + return dg.renderValue(writer, Type.bool, is_null_val, location); + + if (ty.optionalReprIsPayload(mod)) { + return dg.renderValue(writer, payload_ty, switch (opt.val) { + .none => try mod.intValue(payload_ty, 0), + else => opt.val.toValue(), + }, location); + } + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeAll("{ .payload = "); + try dg.renderValue(writer, payload_ty, switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.ip_index }), + else => opt.val, + }.toValue(), initializer_type); + try writer.writeAll(", .is_null = "); + try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); + try writer.writeAll(" }"); + }, + .aggregate => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type, .vector_type => { + if (location == .FunctionArgument) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + // Fall back to generic implementation. + + // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal + const max_string_initializer_len = 65535; + + const ai = ty.arrayInfo(mod); + if (ai.elem_type.eql(Type.u8, mod)) { + if (ai.len <= max_string_initializer_len) { + var literal = stringLiteral(writer); + try literal.start(); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + const elem_val = try val.elemValue(mod, index); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try literal.writeChar(elem_val_u8); + } + if (ai.sentinel) |s| { + const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); + if (s_u8 != 0) try literal.writeChar(s_u8); + } + try literal.end(); + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(mod, index); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try writer.print("'\\x{x}'", .{elem_val_u8}); + } + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); + } + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(mod, index); + try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); + } + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); + } + }, + .struct_type, .anon_struct_type => switch (ty.containerLayout(mod)) { + .Auto, .Extern => { + const field_vals = val.castTag(.aggregate).?.data; + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeByte('{'); + var empty = true; + for (field_vals, 0..) |field_val, field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + if (!empty) try writer.writeByte(','); + try dg.renderValue(writer, field_ty, field_val, initializer_type); + + empty = false; + } + try writer.writeByte('}'); + }, + .Packed => { + const field_vals = val.castTag(.aggregate).?.data; + const int_info = ty.intInfo(mod); + + const bits = Type.smallestUnsignedBits(int_info.bits - 1); + const bit_offset_ty = try mod.intType(.unsigned, bits); + + var bit_offset: u64 = 0; + + var eff_num_fields: usize = 0; + for (0..field_vals.len) |field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + eff_num_fields += 1; + } + + if (eff_num_fields == 0) { + try writer.writeByte('('); + try dg.renderValue(writer, ty, Value.undef, initializer_type); + try writer.writeByte(')'); + } else if (ty.bitSize(mod) > 64) { + // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) + var num_or = eff_num_fields - 1; + while (num_or > 0) : (num_or -= 1) { + try writer.writeAll("zig_or_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + } + + var eff_index: usize = 0; + var needs_closing_paren = false; + for (field_vals, 0..) |field_val, field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + const cast_context = IntCastContext{ .value = .{ .value = field_val } }; + if (bit_offset != 0) { + try writer.writeAll("zig_shl_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); + try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + try writer.writeByte(')'); + } else { + try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); + } + + if (needs_closing_paren) try writer.writeByte(')'); + if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); + + bit_offset += field_ty.bitSize(mod); + needs_closing_paren = true; + eff_index += 1; + } + } else { + try writer.writeByte('('); + // a << a_off | b << b_off | c << c_off + var empty = true; + for (field_vals, 0..) |field_val, field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + if (!empty) try writer.writeAll(" | "); + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + + if (bit_offset != 0) { + try dg.renderValue(writer, field_ty, field_val, .Other); + try writer.writeAll(" << "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + } else { + try dg.renderValue(writer, field_ty, field_val, .Other); + } + + bit_offset += field_ty.bitSize(mod); + empty = false; + } + try writer.writeByte(')'); + } + }, + }, + else => unreachable, + }, + .un => { const union_obj = val.castTag(.@"union").?.data; if (!location.isInitializer()) { @@ -1461,22 +1571,6 @@ pub const DeclGen = struct { if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}'); try writer.writeByte('}'); }, - - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .EnumLiteral => unreachable, - .Void => unreachable, - .NoReturn => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .Opaque => unreachable, - - .Frame, - .AnyFrame, - => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ - @tagName(tag), - }), } } @@ -1504,8 +1598,7 @@ pub const DeclGen = struct { else => unreachable, } } - if (fn_decl.val.castTag(.function)) |func_payload| - if (func_payload.data.is_cold) try w.writeAll("zig_cold "); + if (fn_decl.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold "); if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( @@ -1747,18 +1840,12 @@ pub const DeclGen = struct { fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool { const mod = dg.module; - switch (tv.val.tag()) { - .extern_fn => return true, - .function => { - const func = tv.val.castTag(.function).?.data; - return mod.decl_exports.contains(func.owner_decl); - }, - .variable => { - const variable = tv.val.castTag(.variable).?.data; - return mod.decl_exports.contains(variable.owner_decl); - }, + return switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .variable => |variable| mod.decl_exports.contains(variable.decl), + .extern_func => true, + .func => |func| mod.decl_exports.contains(mod.funcPtr(func.index).owner_decl), else => unreachable, - } + }; } fn writeCValue(dg: *DeclGen, w: anytype, c_value: CValue) !void { @@ -1833,7 +1920,7 @@ pub const DeclGen = struct { try dg.writeCValue(writer, member); } - fn renderFwdDecl(dg: *DeclGen, decl_index: Decl.Index, variable: *Module.Var) !void { + fn renderFwdDecl(dg: *DeclGen, decl_index: Decl.Index, variable: InternPool.Key.Variable) !void { const decl = dg.module.declPtr(decl_index); const fwd_decl_writer = dg.fwd_decl.writer(); const is_global = dg.declIsGlobal(.{ .ty = decl.ty, .val = decl.val }) or variable.is_extern; @@ -1844,7 +1931,7 @@ pub const DeclGen = struct { fwd_decl_writer, decl.ty, .{ .decl = decl_index }, - CQualifiers.init(.{ .@"const" = !variable.is_mutable }), + CQualifiers.init(.{ .@"const" = variable.is_const }), decl.@"align", .complete, ); @@ -1858,7 +1945,7 @@ pub const DeclGen = struct { if (mod.decl_exports.get(decl_index)) |exports| { try writer.writeAll(exports.items[export_index].options.name); - } else if (decl.isExtern()) { + } else if (decl.isExtern(mod)) { try writer.writeAll(mem.span(decl.name)); } else { // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), @@ -2416,8 +2503,11 @@ pub fn genErrDecls(o: *Object) !void { var max_name_len: usize = 0; for (mod.error_name_list.items, 0..) |name, value| { max_name_len = std.math.max(name.len, max_name_len); - var err_pl = Value.Payload.Error{ .data = .{ .name = name } }; - try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other); + const err_val = try mod.intern(.{ .err = .{ + .ty = .anyerror_type, + .name = mod.intern_pool.getString(name).unwrap().?, + } }); + try o.dg.renderValue(writer, Type.anyerror, err_val.toValue(), .Other); try writer.print(" = {d}u,\n", .{value}); } o.indent_writer.popIndent(); @@ -2451,7 +2541,7 @@ pub fn genErrDecls(o: *Object) !void { const name_array_ty = try mod.arrayType(.{ .len = mod.error_name_list.items.len, - .child = .const_slice_u8_sentinel_0_type, + .child = .slice_const_u8_sentinel_0_type, .sentinel = .zero_u8, }); @@ -2497,7 +2587,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { .tag_name => { const enum_ty = val.data.tag_name; - const name_slice_ty = Type.const_slice_u8_sentinel_0; + const name_slice_ty = Type.slice_const_u8_sentinel_0; try w.writeAll("static "); try o.dg.renderType(w, name_slice_ty); @@ -2668,14 +2758,13 @@ pub fn genDecl(o: *Object) !void { const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return; - if (tv.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll("zig_extern "); try o.dg.renderFunctionSignature(fwd_decl_writer, decl_c_value.decl, .forward, .{ .export_index = 0 }); try fwd_decl_writer.writeAll(";\n"); try genExports(o); - } else if (tv.val.castTag(.variable)) |var_payload| { - const variable: *Module.Var = var_payload.data; + } else if (decl.getVariable(mod)) |variable| { try o.dg.renderFwdDecl(decl_c_value.decl, variable); try genExports(o); @@ -2690,7 +2779,7 @@ pub fn genDecl(o: *Object) !void { try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.@"align", .complete); if (decl.@"linksection" != null) try w.writeAll(", read, write)"); try w.writeAll(" = "); - try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer); + try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer); try w.writeByte(';'); try o.indent_writer.insertNewline(); } else { @@ -4157,10 +4246,13 @@ fn airCall( known: { const fn_decl = fn_decl: { const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known; - break :fn_decl switch (callee_val.tag()) { - .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl, - .function => callee_val.castTag(.function).?.data.owner_decl, - .decl_ref => callee_val.castTag(.decl_ref).?.data, + break :fn_decl switch (mod.intern_pool.indexToKey(callee_val.ip_index)) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => break :known, + }, else => break :known, }; }; @@ -4231,9 +4323,9 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; - const writer = f.object.writer(); - const function = f.air.values[ty_pl.payload].castTag(.function).?.data; const mod = f.object.dg.module; + const writer = f.object.writer(); + const function = f.air.values[ty_pl.payload].getFunction(mod).?; try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name}); return .none; } @@ -6634,9 +6726,6 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, accum, .Other); try writer.writeAll(" = "); - var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa); - defer arena.deinit(); - try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { .Or, .Xor, .Add => try mod.intValue(scalar_ty, 0), .And => switch (scalar_ty.zigTypeTag(mod)) { @@ -6654,7 +6743,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { }, .Max => switch (scalar_ty.zigTypeTag(mod)) { .Bool => try mod.intValue(scalar_ty, 0), - .Int => try scalar_ty.minInt(arena.allocator(), mod), + .Int => try scalar_ty.minInt(mod), .Float => try mod.floatValue(scalar_ty, std.math.nan_f128), else => unreachable, }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8dec958806a3..f8ddddad1c9f 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -582,7 +582,7 @@ pub const Object = struct { llvm_usize_ty, }; const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False); - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); const error_name_list = mod.error_name_list.items; @@ -866,10 +866,11 @@ pub const Object = struct { pub fn updateFunc( o: *Object, mod: *Module, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, ) !void { + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const target = mod.getTarget(); @@ -886,7 +887,7 @@ pub const Object = struct { const llvm_func = try dg.resolveLlvmFunction(decl_index); - if (mod.align_stack_fns.get(func)) |align_info| { + if (mod.align_stack_fns.get(func_index)) |align_info| { dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment); dg.addFnAttr(llvm_func, "noinline"); } else { @@ -1164,7 +1165,7 @@ pub const Object = struct { di_file = try dg.object.getDIFile(gpa, mod.namespacePtr(decl.src_namespace).file_scope); const line_number = decl.src_line + 1; - const is_internal_linkage = decl.val.tag() != .extern_fn and + const is_internal_linkage = decl.getExternFunc(mod) == null and !mod.decl_exports.contains(decl_index); const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type) llvm.DIFlags.NoReturn @@ -1269,18 +1270,20 @@ pub const Object = struct { // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. const llvm_global = self.decl_map.get(decl_index) orelse return; const decl = mod.declPtr(decl_index); - if (decl.isExtern()) { - const is_wasm_fn = mod.getTarget().isWasm() and try decl.isFunction(mod); - const mangle_name = is_wasm_fn and - decl.getExternFn().?.lib_name != null and - !std.mem.eql(u8, std.mem.sliceTo(decl.getExternFn().?.lib_name.?, 0), "c"); - const decl_name = if (mangle_name) name: { - const tmp = try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ - decl.name, decl.getExternFn().?.lib_name.?, - }); - break :name tmp.ptr; - } else decl.name; - defer if (mangle_name) gpa.free(std.mem.sliceTo(decl_name, 0)); + if (decl.isExtern(mod)) { + var free_decl_name = false; + const decl_name = decl_name: { + if (mod.getTarget().isWasm() and try decl.isFunction(mod)) { + if (mod.intern_pool.stringToSliceUnwrap(decl.getExternFunc(mod).?.lib_name)) |lib_name| { + if (!std.mem.eql(u8, lib_name, "c")) { + free_decl_name = true; + break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ decl.name, lib_name }); + } + } + } + break :decl_name std.mem.span(decl.name); + }; + defer if (free_decl_name) gpa.free(decl_name); llvm_global.setValueName(decl_name); if (self.getLlvmGlobal(decl_name)) |other_global| { @@ -1303,13 +1306,13 @@ pub const Object = struct { di_global.replaceLinkageName(linkage_name); } } - if (decl.val.castTag(.variable)) |variable| { - if (variable.data.is_threadlocal) { + if (decl.getVariable(mod)) |variable| { + if (variable.is_threadlocal) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.data.is_weak_linkage) { + if (variable.is_weak_linkage) { llvm_global.setLinkage(.ExternalWeak); } } @@ -1345,8 +1348,8 @@ pub const Object = struct { defer gpa.free(section_z); llvm_global.setSection(section_z); } - if (decl.val.castTag(.variable)) |variable| { - if (variable.data.is_threadlocal) { + if (decl.getVariable(mod)) |variable| { + if (variable.is_threadlocal) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } } @@ -1379,9 +1382,9 @@ pub const Object = struct { llvm_global.setLinkage(.Internal); if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); llvm_global.setUnnamedAddr(.True); - if (decl.val.castTag(.variable)) |variable| { + if (decl.getVariable(mod)) |variable| { const single_threaded = mod.comp.bin_file.options.single_threaded; - if (variable.data.is_threadlocal and !single_threaded) { + if (variable.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { llvm_global.setThreadLocalMode(.NotThreadLocal); @@ -1510,12 +1513,11 @@ pub const Object = struct { for (enum_type.names, 0..) |field_name_ip, i| { const field_name_z = ip.stringToSlice(field_name_ip); - var bigint_space: InternPool.Key.Int.Storage.BigIntSpace = undefined; - const storage = if (enum_type.values.len != 0) - ip.indexToKey(enum_type.values[i]).int.storage + var bigint_space: Value.BigIntSpace = undefined; + const bigint = if (enum_type.values.len != 0) + enum_type.values[i].toValue().toBigInt(&bigint_space, mod) else - InternPool.Key.Int.Storage{ .u64 = i }; - const bigint = storage.toBigInt(&bigint_space); + std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst(); if (bigint.limbs.len == 1) { enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned); @@ -2442,6 +2444,7 @@ pub const DeclGen = struct { } fn genDecl(dg: *DeclGen) !void { + const mod = dg.module; const decl = dg.decl; const decl_index = dg.decl_index; assert(decl.has_tv); @@ -2449,19 +2452,16 @@ pub const DeclGen = struct { log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty.fmtDebug(), decl.val.fmtDebug(), }); - assert(decl.val.ip_index != .none or decl.val.tag() != .function); - if (decl.val.castTag(.extern_fn)) |extern_fn| { - _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl); + if (decl.getExternFunc(mod)) |extern_func| { + _ = try dg.resolveLlvmFunction(extern_func.decl); } else { - const mod = dg.module; const target = mod.getTarget(); var global = try dg.resolveGlobalDecl(decl_index); global.setAlignment(decl.getAlignment(mod)); if (decl.@"linksection") |section| global.setSection(section); assert(decl.has_tv); - const init_val = if (decl.val.castTag(.variable)) |payload| init_val: { - const variable = payload.data; - break :init_val variable.init; + const init_val = if (decl.getVariable(mod)) |variable| init_val: { + break :init_val variable.init.toValue(); } else init_val: { global.setGlobalConstant(.True); break :init_val decl.val; @@ -2519,7 +2519,7 @@ pub const DeclGen = struct { ); try dg.object.di_map.put(dg.gpa, dg.decl, di_global.getVariable().toNode()); - if (!is_internal_linkage or decl.isExtern()) global.attachMetaData(di_global); + if (!is_internal_linkage or decl.isExtern(mod)) global.attachMetaData(di_global); } } } @@ -2548,17 +2548,16 @@ pub const DeclGen = struct { const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace); gop.value_ptr.* = llvm_fn; - const is_extern = decl.isExtern(); + const is_extern = decl.isExtern(mod); if (!is_extern) { llvm_fn.setLinkage(.Internal); llvm_fn.setUnnamedAddr(.True); } else { if (target.isWasm()) { dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); - if (decl.getExternFn().?.lib_name) |lib_name| { - const module_name = std.mem.sliceTo(lib_name, 0); - if (!std.mem.eql(u8, module_name, "c")) { - dg.addFnAttrString(llvm_fn, "wasm-import-module", module_name); + if (mod.intern_pool.stringToSliceUnwrap(decl.getExternFunc(mod).?.lib_name)) |lib_name| { + if (!std.mem.eql(u8, lib_name, "c")) { + dg.addFnAttrString(llvm_fn, "wasm-import-module", lib_name); } } } @@ -2695,11 +2694,12 @@ pub const DeclGen = struct { if (gop.found_existing) return gop.value_ptr.*; errdefer assert(dg.object.decl_map.remove(decl_index)); - const decl = dg.module.declPtr(decl_index); - const fqn = try decl.getFullyQualifiedName(dg.module); + const mod = dg.module; + const decl = mod.declPtr(decl_index); + const fqn = try decl.getFullyQualifiedName(mod); defer dg.gpa.free(fqn); - const target = dg.module.getTarget(); + const target = mod.getTarget(); const llvm_type = try dg.lowerType(decl.ty); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); @@ -2712,18 +2712,18 @@ pub const DeclGen = struct { gop.value_ptr.* = llvm_global; // This is needed for declarations created by `@extern`. - if (decl.isExtern()) { + if (decl.isExtern(mod)) { llvm_global.setValueName(decl.name); llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); - if (decl.val.castTag(.variable)) |variable| { - const single_threaded = dg.module.comp.bin_file.options.single_threaded; - if (variable.data.is_threadlocal and !single_threaded) { + if (decl.getVariable(mod)) |variable| { + const single_threaded = mod.comp.bin_file.options.single_threaded; + if (variable.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.data.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak); + if (variable.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak); } } else { llvm_global.setLinkage(.Internal); @@ -3199,468 +3199,344 @@ pub const DeclGen = struct { const mod = dg.module; const target = mod.getTarget(); var tv = arg_tv; - if (tv.val.castTag(.runtime_value)) |rt| { - tv.val = rt.data; + switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .runtime_value => |rt| tv.val = rt.val.toValue(), + else => {}, } - if (tv.val.isUndef(mod)) { + if (tv.val.isUndefDeep(mod)) { const llvm_type = try dg.lowerType(tv.ty); return llvm_type.getUndef(); } - switch (tv.ty.zigTypeTag(mod)) { - .Bool => { - const llvm_type = try dg.lowerType(tv.ty); - return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull(); - }, - .Int => switch (tv.val.ip_index) { - .none => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - else => { - var bigint_space: Value.BigIntSpace = undefined; - const bigint = tv.val.toBigInt(&bigint_space, mod); - return lowerBigInt(dg, tv.ty, bigint); - }, - }, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .int => |int| { - var bigint_space: Value.BigIntSpace = undefined; - const bigint = int.storage.toBigInt(&bigint_space); - return lowerBigInt(dg, tv.ty, bigint); - }, - else => unreachable, - }, - }, - .Enum => { - const int_val = try tv.enumToInt(mod); - var bigint_space: Value.BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_space, mod); - - const int_info = tv.ty.intInfo(mod); - const llvm_type = dg.context.intType(int_info.bits); - - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); - } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); - } - return unsigned_val; - }, - .Float => { - const llvm_ty = try dg.lowerType(tv.ty); - switch (tv.ty.floatBits(target)) { - 16 => { - const repr = @bitCast(u16, tv.val.toFloat(f16, mod)); - const llvm_i16 = dg.context.intType(16); - const int = llvm_i16.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 32 => { - const repr = @bitCast(u32, tv.val.toFloat(f32, mod)); - const llvm_i32 = dg.context.intType(32); - const int = llvm_i32.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 64 => { - const repr = @bitCast(u64, tv.val.toFloat(f64, mod)); - const llvm_i64 = dg.context.intType(64); - const int = llvm_i64.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 80 => { - const float = tv.val.toFloat(f80, mod); - const repr = std.math.break_f80(float); - const llvm_i80 = dg.context.intType(80); - var x = llvm_i80.constInt(repr.exp, .False); - x = x.constShl(llvm_i80.constInt(64, .False)); - x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); - if (backendSupportsF80(target)) { - return x.constBitCast(llvm_ty); - } else { - return x; - } - }, - 128 => { - var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod)); - // LLVM seems to require that the lower half of the f128 be placed first - // in the buffer. - if (native_endian == .Big) { - std.mem.swap(u64, &buf[0], &buf[1]); - } - const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); - return int.constBitCast(llvm_ty); - }, - else => unreachable, - } - }, - .Pointer => switch (tv.val.ip_index) { - .null_value => { - const llvm_type = try dg.lowerType(tv.ty); - return llvm_type.constNull(); - }, - .none => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - .variable => { - const decl_index = tv.val.castTag(.variable).?.data.owner_decl; - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); - - const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - - const val = try dg.resolveGlobalDecl(decl_index); - const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) - else - val; - return addrspace_casted_ptr; - }, - .slice => { - const slice = tv.val.castTag(.slice).?.data; - const fields: [2]*llvm.Value = .{ - try dg.lowerValue(.{ - .ty = tv.ty.slicePtrFieldType(mod), - .val = slice.ptr, - }), - try dg.lowerValue(.{ - .ty = Type.usize, - .val = slice.len, - }), - }; - return dg.context.constStruct(&fields, fields.len, .False); - }, - .lazy_align, .lazy_size => { - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); - return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); - }, - .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { - return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); - }, - .opt_payload => { - const payload = tv.val.castTag(.opt_payload).?.data; - return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); - }, - else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ - tv.ty.fmtDebug(), tag, - }), - }, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .int => |int| return dg.lowerIntAsPtr(int), - .ptr => |ptr| { - const ptr_tv: TypedValue = switch (ptr.len) { - .none => tv, - else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) }, - }; - const llvm_ptr_val = switch (ptr.addr) { - .@"var" => |@"var"| ptr: { - const decl = dg.module.declPtr(@"var".owner_decl); - dg.module.markDeclAlive(decl); - - const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - - const val = try dg.resolveGlobalDecl(@"var".owner_decl); - const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) - else - val; - break :ptr addrspace_casted_ptr; - }, - .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl), - .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl), - .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), - .eu_payload, - .opt_payload, - .elem, - .field, - => try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).bit_offset % 8 == 0), - .comptime_field => unreachable, - }; - switch (ptr.len) { - .none => return llvm_ptr_val, - else => { - const fields: [2]*llvm.Value = .{ - llvm_ptr_val, - try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }), - }; - return dg.context.constStruct(&fields, fields.len, .False); - }, - } - }, - else => unreachable, + if (tv.val.ip_index == .none) switch (tv.ty.zigTypeTag(mod)) { + .Array => switch (tv.val.tag()) { + .bytes => { + const bytes = tv.val.castTag(.bytes).?.data; + return dg.context.constString( + bytes.ptr, + @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), + .True, // Don't null terminate. Bytes has the sentinel, if any. + ); }, - }, - .Array => switch (tv.val.ip_index) { - .none => switch (tv.val.tag()) { - .bytes => { - const bytes = tv.val.castTag(.bytes).?.data; - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), - .True, // Don't null terminate. Bytes has the sentinel, if any. - ); - }, - .str_lit => { - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - if (tv.ty.sentinel(mod)) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); - if (byte == 0 and bytes.len > 0) { - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, bytes.len), - .False, // Yes, null terminate. - ); - } - var array = std.ArrayList(u8).init(dg.gpa); - defer array.deinit(); - try array.ensureUnusedCapacity(bytes.len + 1); - array.appendSliceAssumeCapacity(bytes); - array.appendAssumeCapacity(byte); - return dg.context.constString( - array.items.ptr, - @intCast(c_uint, array.items.len), - .True, // Don't null terminate. - ); - } else { + .str_lit => { + const str_lit = tv.val.castTag(.str_lit).?.data; + const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + if (tv.ty.sentinel(mod)) |sent_val| { + const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); + if (byte == 0 and bytes.len > 0) { return dg.context.constString( bytes.ptr, @intCast(c_uint, bytes.len), - .True, // Don't null terminate. `bytes` has the sentinel, if any. - ); - } - }, - .aggregate => { - const elem_vals = tv.val.castTag(.aggregate).?.data; - const elem_ty = tv.ty.childType(mod); - const gpa = dg.gpa; - const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel(mod)); - const llvm_elems = try gpa.alloc(*llvm.Value, len); - defer gpa.free(llvm_elems); - var need_unnamed = false; - for (elem_vals[0..len], 0..) |elem_val, i| { - llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + .False, // Yes, null terminate. ); } - }, - .repeated => { - const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.childType(mod); - const sentinel = tv.ty.sentinel(mod); - const len = @intCast(usize, tv.ty.arrayLen(mod)); - const len_including_sent = len + @boolToInt(sentinel != null); - const gpa = dg.gpa; - const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); - defer gpa.free(llvm_elems); + var array = std.ArrayList(u8).init(dg.gpa); + defer array.deinit(); + try array.ensureUnusedCapacity(bytes.len + 1); + array.appendSliceAssumeCapacity(bytes); + array.appendAssumeCapacity(byte); + return dg.context.constString( + array.items.ptr, + @intCast(c_uint, array.items.len), + .True, // Don't null terminate. + ); + } else { + return dg.context.constString( + bytes.ptr, + @intCast(c_uint, bytes.len), + .True, // Don't null terminate. `bytes` has the sentinel, if any. + ); + } + }, + else => unreachable, + }, + .Struct => { + const llvm_struct_ty = try dg.lowerType(tv.ty); + const gpa = dg.gpa; + + const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .anon_struct_type => |tuple| { + var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; + defer llvm_fields.deinit(gpa); + try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; var need_unnamed = false; - if (len != 0) { - for (llvm_elems[0..len]) |*elem| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); + + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); - } - if (sentinel) |sent| { - llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); + const field_llvm_val = try dg.lowerValue(.{ + .ty = field_ty.toType(), + .val = try tv.val.fieldValue(mod, i), + }); + + need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); + + llvm_fields.appendAssumeCapacity(field_llvm_val); + + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } } if (need_unnamed) { return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, ); } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), ); } }, - .empty_array_sentinel => { - const elem_ty = tv.ty.childType(mod); - const sent_val = tv.ty.sentinel(mod).?; - const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val }); - const llvm_elems: [1]*llvm.Value = .{sentinel}; - const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); - if (need_unnamed) { - return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len); - } - }, + .struct_type => |struct_type| struct_type, else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .aggregate => |aggregate| switch (aggregate.storage) { - .elems => |elem_vals| { - const elem_ty = tv.ty.childType(mod); - const gpa = dg.gpa; - const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len); - defer gpa.free(llvm_elems); - var need_unnamed = false; - for (elem_vals, 0..) |elem_val, i| { - llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - } - }, - .repeated_elem => |val| { - const elem_ty = tv.ty.childType(mod); - const sentinel = tv.ty.sentinel(mod); - const len = @intCast(usize, tv.ty.arrayLen(mod)); - const len_including_sent = len + @boolToInt(sentinel != null); - const gpa = dg.gpa; - const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); - defer gpa.free(llvm_elems); + }; - var need_unnamed = false; - if (len != 0) { - for (llvm_elems[0..len]) |*elem| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val.toValue() }); - } - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); - } + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - if (sentinel) |sent| { - llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); - } + if (struct_obj.layout == .Packed) { + assert(struct_obj.haveLayout()); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); + const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); + const fields = struct_obj.fields.values(); + comptime assert(Type.packed_struct_layout_version == 2); + var running_int: *llvm.Value = int_llvm_ty.constNull(); + var running_bits: u16 = 0; + for (fields, 0..) |field, i| { + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - } - }, - }, - else => unreachable, - }, - }, - .Optional => { - comptime assert(optional_layout_version == 3); - const payload_ty = tv.ty.optionalChild(mod); + const non_int_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, i), + }); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const small_int_ty = dg.context.intType(ty_bit_size); + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) + non_int_val.constPtrToInt(small_int_ty) + else + non_int_val.constBitCast(small_int_ty); + const shift_rhs = int_llvm_ty.constInt(running_bits, .False); + // If the field is as large as the entire packed struct, this + // zext would go from, e.g. i16 to i16. This is legal with + // constZExtOrBitCast but not legal with constZExt. + const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); + const shifted = extended_int_val.constShl(shift_rhs); + running_int = running_int.constOr(shifted); + running_bits += ty_bit_size; + } + return running_int; + } - const llvm_i8 = dg.context.intType(8); - const is_pl = !tv.val.isNull(mod); - const non_null_bit = if (is_pl) llvm_i8.constInt(1, .False) else llvm_i8.constNull(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return non_null_bit; + const llvm_field_count = llvm_struct_ty.countStructElementTypes(); + var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); + defer llvm_fields.deinit(gpa); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; + + var it = struct_obj.runtimeFieldIterator(mod); + while (it.next()) |field_and_index| { + const field = field_and_index.field; + const field_align = field.alignment(mod, struct_obj.layout); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } + + const field_llvm_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, field_and_index.index), + }); + + need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); + + llvm_fields.appendAssumeCapacity(field_llvm_val); + + offset += field.ty.abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } } - const llvm_ty = try dg.lowerType(tv.ty); - if (tv.ty.optionalReprIsPayload(mod)) return switch (tv.val.ip_index) { - .none => if (tv.val.castTag(.opt_payload)) |payload| - try dg.lowerValue(.{ .ty = payload_ty, .val = payload.data }) - else if (is_pl) - try dg.lowerValue(.{ .ty = payload_ty, .val = tv.val }) - else - llvm_ty.constNull(), - .null_value => llvm_ty.constNull(), - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .opt => |opt| switch (opt.val) { - .none => llvm_ty.constNull(), - else => dg.lowerValue(.{ .ty = payload_ty, .val = opt.val.toValue() }), - }, - else => unreachable, - }, - }; - assert(payload_ty.zigTypeTag(mod) != .Fn); - const llvm_field_count = llvm_ty.countStructElementTypes(); - var fields_buf: [3]*llvm.Value = undefined; - fields_buf[0] = try dg.lowerValue(.{ - .ty = payload_ty, - .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.undef, - }); - fields_buf[1] = non_null_bit; - if (llvm_field_count > 2) { - assert(llvm_field_count == 3); - fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef(); + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); } - return dg.context.constStruct(&fields_buf, llvm_field_count, .False); }, - .Fn => { - const fn_decl_index = switch (tv.val.tag()) { - .extern_fn => tv.val.castTag(.extern_fn).?.data.owner_decl, - .function => tv.val.castTag(.function).?.data.owner_decl, - else => unreachable, - }; - const fn_decl = dg.module.declPtr(fn_decl_index); - dg.module.markDeclAlive(fn_decl); - return dg.resolveLlvmFunction(fn_decl_index); + .Vector => switch (tv.val.tag()) { + .bytes => { + // Note, sentinel is not stored even if the type has a sentinel. + const bytes = tv.val.castTag(.bytes).?.data; + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); + assert(vector_len == bytes.len or vector_len + 1 == bytes.len); + + const elem_ty = tv.ty.childType(mod); + const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); + defer dg.gpa.free(llvm_elems); + for (llvm_elems, 0..) |*elem, i| { + elem.* = try dg.lowerValue(.{ + .ty = elem_ty, + .val = try mod.intValue(elem_ty, bytes[i]), + }); + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + .str_lit => { + // Note, sentinel is not stored + const str_lit = tv.val.castTag(.str_lit).?.data; + const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); + assert(vector_len == bytes.len); + + const elem_ty = tv.ty.childType(mod); + const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); + defer dg.gpa.free(llvm_elems); + for (llvm_elems, 0..) |*elem, i| { + elem.* = try dg.lowerValue(.{ + .ty = elem_ty, + .val = try mod.intValue(elem_ty, bytes[i]), + }); + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + else => unreachable, }, - .ErrorSet => { + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Frame, + .AnyFrame, + => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}), + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => { + const llvm_type = try dg.lowerType(tv.ty); + return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull(); + }, + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => |int| { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int.storage.toBigInt(&bigint_space); + return lowerBigInt(dg, tv.ty, bigint); + }, + .err => |err| { const llvm_ty = try dg.lowerType(Type.anyerror); - switch (tv.val.ip_index) { - .none => switch (tv.val.tag()) { - .@"error" => { - const err_name = tv.val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - return llvm_ty.constInt(kv.value, .False); - }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return llvm_ty.constNull(); - }, - }, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .int => |int| return llvm_ty.constInt(int.storage.u64, .False), - else => unreachable, - }, - } + const name = mod.intern_pool.stringToSlice(err.name); + const kv = try mod.getErrorValue(name); + return llvm_ty.constInt(kv.value, .False); }, - .ErrorUnion => { + .error_union => |error_union| { const payload_type = tv.ty.errorUnionPayload(mod); - const is_pl = tv.val.errorUnionIsPayload(); + const is_pl = tv.val.errorUnionIsPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. @@ -3676,7 +3552,10 @@ pub const DeclGen = struct { }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, - .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.undef, + .val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_type.ip_index }), + .payload => |payload| payload, + }.toValue(), }); var fields_buf: [3]*llvm.Value = undefined; @@ -3697,172 +3576,396 @@ pub const DeclGen = struct { return dg.context.constStruct(&fields_buf, llvm_field_count, .False); } }, - .Struct => { - const llvm_struct_ty = try dg.lowerType(tv.ty); - const gpa = dg.gpa; + .enum_tag => { + const int_val = try tv.enumToInt(mod); - const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { - .anon_struct_type => |tuple| { - var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; - defer llvm_fields.deinit(gpa); + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int_val.toBigInt(&bigint_space, mod); - try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + const int_info = tv.ty.intInfo(mod); + const llvm_type = dg.context.intType(int_info.bits); - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; + const unsigned_val = v: { + if (bigint.limbs.len == 1) { + break :v llvm_type.constInt(bigint.limbs[0], .False); + } + if (@sizeOf(usize) == @sizeOf(u64)) { + break :v llvm_type.constIntOfArbitraryPrecision( + @intCast(c_uint, bigint.limbs.len), + bigint.limbs.ptr, + ); + } + @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); + }; + if (!bigint.positive) { + return llvm.constNeg(unsigned_val); + } + return unsigned_val; + }, + .float => { + const llvm_ty = try dg.lowerType(tv.ty); + switch (tv.ty.floatBits(target)) { + 16 => { + const repr = @bitCast(u16, tv.val.toFloat(f16, mod)); + const llvm_i16 = dg.context.intType(16); + const int = llvm_i16.constInt(repr, .False); + return int.constBitCast(llvm_ty); + }, + 32 => { + const repr = @bitCast(u32, tv.val.toFloat(f32, mod)); + const llvm_i32 = dg.context.intType(32); + const int = llvm_i32.constInt(repr, .False); + return int.constBitCast(llvm_ty); + }, + 64 => { + const repr = @bitCast(u64, tv.val.toFloat(f64, mod)); + const llvm_i64 = dg.context.intType(64); + const int = llvm_i64.constInt(repr, .False); + return int.constBitCast(llvm_ty); + }, + 80 => { + const float = tv.val.toFloat(f80, mod); + const repr = std.math.break_f80(float); + const llvm_i80 = dg.context.intType(80); + var x = llvm_i80.constInt(repr.exp, .False); + x = x.constShl(llvm_i80.constInt(64, .False)); + x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); + if (backendSupportsF80(target)) { + return x.constBitCast(llvm_ty); + } else { + return x; + } + }, + 128 => { + var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod)); + // LLVM seems to require that the lower half of the f128 be placed first + // in the buffer. + if (native_endian == .Big) { + std.mem.swap(u64, &buf[0], &buf[1]); + } + const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); + return int.constBitCast(llvm_ty); + }, + else => unreachable, + } + }, + .ptr => |ptr| { + const ptr_tv: TypedValue = switch (ptr.len) { + .none => tv, + else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) }, + }; + const llvm_ptr_val = switch (ptr.addr) { + .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl), + .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl), + .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), + .eu_payload, + .opt_payload, + .elem, + .field, + => try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).bit_offset % 8 == 0), + .comptime_field => unreachable, + }; + switch (ptr.len) { + .none => return llvm_ptr_val, + else => { + const fields: [2]*llvm.Value = .{ + llvm_ptr_val, + try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }), + }; + return dg.context.constStruct(&fields, fields.len, .False); + }, + } + }, + .opt => |opt| { + comptime assert(optional_layout_version == 3); + const payload_ty = tv.ty.optionalChild(mod); - for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { - if (field_val != .none) continue; - if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + const llvm_i8 = dg.context.intType(8); + const non_null_bit = switch (opt.val) { + .none => llvm_i8.constNull(), + else => llvm_i8.constInt(1, .False), + }; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return non_null_bit; + } + const llvm_ty = try dg.lowerType(tv.ty); + if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) { + .none => llvm_ty.constNull(), + else => dg.lowerValue(.{ .ty = payload_ty, .val = opt.val.toValue() }), + }; + assert(payload_ty.zigTypeTag(mod) != .Fn); - const field_align = field_ty.toType().abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const llvm_field_count = llvm_ty.countStructElementTypes(); + var fields_buf: [3]*llvm.Value = undefined; + fields_buf[0] = try dg.lowerValue(.{ + .ty = payload_ty, + .val = switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.ip_index }), + else => |payload| payload, + }.toValue(), + }); + fields_buf[1] = non_null_bit; + if (llvm_field_count > 2) { + assert(llvm_field_count == 3); + fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef(); + } + return dg.context.constStruct(&fields_buf, llvm_field_count, .False); + }, + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .array_type => switch (aggregate.storage) { + .bytes => |bytes| return dg.context.constString( + bytes.ptr, + @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), + .True, // Don't null terminate. Bytes has the sentinel, if any. + ), + .elems => |elem_vals| { + const elem_ty = tv.ty.childType(mod); + const gpa = dg.gpa; + const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len); + defer gpa.free(llvm_elems); + var need_unnamed = false; + for (elem_vals, 0..) |elem_val, i| { + llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); + } + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + .repeated_elem => |val| { + const elem_ty = tv.ty.childType(mod); + const sentinel = tv.ty.sentinel(mod); + const len = @intCast(usize, tv.ty.arrayLen(mod)); + const len_including_sent = len + @boolToInt(sentinel != null); + const gpa = dg.gpa; + const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); + defer gpa.free(llvm_elems); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + var need_unnamed = false; + if (len != 0) { + for (llvm_elems[0..len]) |*elem| { + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val.toValue() }); } + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); + } - const field_llvm_val = try dg.lowerValue(.{ - .ty = field_ty.toType(), - .val = try tv.val.fieldValue(field_ty.toType(), mod, i), - }); + if (sentinel) |sent| { + llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); + } - need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + }, + .vector_type => |vector_type| { + const elem_ty = vector_type.child.toType(); + const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_type.len); + defer dg.gpa.free(llvm_elems); + for (llvm_elems, 0..) |*llvm_elem, i| { + llvm_elem.* = try dg.lowerValue(.{ + .ty = elem_ty, + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[i], + .repeated_elem => |elem| elem, + }.toValue(), + }); + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + .struct_type, .anon_struct_type => { + const llvm_struct_ty = try dg.lowerType(tv.ty); + const gpa = dg.gpa; - llvm_fields.appendAssumeCapacity(field_llvm_val); + const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .anon_struct_type => |tuple| { + var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; + defer llvm_fields.deinit(gpa); - offset += field_ty.toType().abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } + try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - }, - .struct_type => |struct_type| struct_type, - else => unreachable, - }; + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const field_llvm_val = try dg.lowerValue(.{ + .ty = field_ty.toType(), + .val = try tv.val.fieldValue(mod, i), + }); - if (struct_obj.layout == .Packed) { - assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); - const fields = struct_obj.fields.values(); - comptime assert(Type.packed_struct_layout_version == 2); - var running_int: *llvm.Value = int_llvm_ty.constNull(); - var running_bits: u16 = 0; - for (fields, 0..) |field, i| { - if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); - const non_int_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(field.ty, mod, i), - }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); - const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime(mod)) - non_int_val.constPtrToInt(small_int_ty) - else - non_int_val.constBitCast(small_int_ty); - const shift_rhs = int_llvm_ty.constInt(running_bits, .False); - // If the field is as large as the entire packed struct, this - // zext would go from, e.g. i16 to i16. This is legal with - // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); - const shifted = extended_int_val.constShl(shift_rhs); - running_int = running_int.constOr(shifted); - running_bits += ty_bit_size; - } - return running_int; - } + llvm_fields.appendAssumeCapacity(field_llvm_val); - const llvm_field_count = llvm_struct_ty.countStructElementTypes(); - var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); - defer llvm_fields.deinit(gpa); + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } + } - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); + } + }, + .struct_type => |struct_type| struct_type, + else => unreachable, + }; - var it = struct_obj.runtimeFieldIterator(mod); - while (it.next()) |field_and_index| { - const field = field_and_index.field; - const field_align = field.alignment(mod, struct_obj.layout); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + if (struct_obj.layout == .Packed) { + assert(struct_obj.haveLayout()); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); + const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); + const fields = struct_obj.fields.values(); + comptime assert(Type.packed_struct_layout_version == 2); + var running_int: *llvm.Value = int_llvm_ty.constNull(); + var running_bits: u16 = 0; + for (fields, 0..) |field, i| { + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + const non_int_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, i), + }); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const small_int_ty = dg.context.intType(ty_bit_size); + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) + non_int_val.constPtrToInt(small_int_ty) + else + non_int_val.constBitCast(small_int_ty); + const shift_rhs = int_llvm_ty.constInt(running_bits, .False); + // If the field is as large as the entire packed struct, this + // zext would go from, e.g. i16 to i16. This is legal with + // constZExtOrBitCast but not legal with constZExt. + const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); + const shifted = extended_int_val.constShl(shift_rhs); + running_int = running_int.constOr(shifted); + running_bits += ty_bit_size; + } + return running_int; } - const field_llvm_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(field.ty, mod, field_and_index.index), - }); + const llvm_field_count = llvm_struct_ty.countStructElementTypes(); + var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); + defer llvm_fields.deinit(gpa); - need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; + + var it = struct_obj.runtimeFieldIterator(mod); + while (it.next()) |field_and_index| { + const field = field_and_index.field; + const field_align = field.alignment(mod, struct_obj.layout); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } - llvm_fields.appendAssumeCapacity(field_llvm_val); + const field_llvm_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, field_and_index.index), + }); - offset += field.ty.abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); + + llvm_fields.appendAssumeCapacity(field_llvm_val); + + offset += field.ty.abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } } - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); + } + }, + else => unreachable, }, - .Union => { + .un => { const llvm_union_ty = try dg.lowerType(tv.ty); const tag_and_val: Value.Payload.Union.Data = switch (tv.val.ip_index) { .none => tv.val.castTag(.@"union").?.data, @@ -3950,96 +4053,6 @@ pub const DeclGen = struct { return llvm_union_ty.constNamedStruct(&fields, fields_len); } }, - .Vector => switch (tv.val.tag()) { - .bytes => { - // Note, sentinel is not stored even if the type has a sentinel. - const bytes = tv.val.castTag(.bytes).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == bytes.len or vector_len + 1 == bytes.len); - - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = try mod.intValue(elem_ty, bytes[i]), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .aggregate => { - // Note, sentinel is not stored even if the type has a sentinel. - // The value includes the sentinel in those cases. - const elem_vals = tv.val.castTag(.aggregate).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len); - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .repeated => { - // Note, sentinel is not stored even if the type has a sentinel. - const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.childType(mod); - const len = @intCast(usize, tv.ty.arrayLen(mod)); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems) |*elem| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .str_lit => { - // Note, sentinel is not stored - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == bytes.len); - - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = try mod.intValue(elem_ty, bytes[i]), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - else => unreachable, - }, - - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .EnumLiteral => unreachable, - .Void => unreachable, - .NoReturn => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .Opaque => unreachable, - - .Frame, - .AnyFrame, - => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}), } } @@ -4094,10 +4107,9 @@ pub const DeclGen = struct { fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { const mod = dg.module; const target = mod.getTarget(); - if (ptr_val.ip_index != .none) return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { .int => |int| dg.lowerIntAsPtr(int), .ptr => |ptr| switch (ptr.addr) { - .@"var" => |@"var"| dg.lowerParentPtrDecl(ptr_val, @"var".owner_decl), .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), .mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl), .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), @@ -4150,7 +4162,7 @@ pub const DeclGen = struct { const indices: [1]*llvm.Value = .{ llvm_usize.constInt(elem_ptr.index, .False), }; - const elem_llvm_ty = try dg.lowerType(ptr.ty.toType().childType(mod)); + const elem_llvm_ty = try dg.lowerType(ptr.ty.toType().elemType2(mod)); return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .field => |field_ptr| { @@ -4185,7 +4197,7 @@ pub const DeclGen = struct { .Struct => { if (parent_ty.containerLayout(mod) == .Packed) { if (!byte_aligned) return parent_llvm_ptr; - const llvm_usize = dg.context.intType(target.cpu.arch.ptrBitWidth()); + const llvm_usize = dg.context.intType(target.ptrBitWidth()); const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); // count bits of fields before this one const prev_bits = b: { @@ -4230,148 +4242,6 @@ pub const DeclGen = struct { }, else => unreachable, }; - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; - return dg.lowerParentPtrDecl(ptr_val, decl); - }, - .decl_ref => { - const decl = ptr_val.castTag(.decl_ref).?.data; - return dg.lowerParentPtrDecl(ptr_val, decl); - }, - .variable => { - const decl = ptr_val.castTag(.variable).?.data.owner_decl; - return dg.lowerParentPtrDecl(ptr_val, decl); - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr, byte_aligned); - const parent_ty = field_ptr.container_ty; - - const field_index = @intCast(u32, field_ptr.field_index); - const llvm_u32 = dg.context.intType(32); - switch (parent_ty.zigTypeTag(mod)) { - .Union => { - if (parent_ty.containerLayout(mod) == .Packed) { - return parent_llvm_ptr; - } - - const layout = parent_ty.unionGetLayout(mod); - if (layout.payload_size == 0) { - // In this case a pointer to the union and a pointer to any - // (void) payload is the same. - return parent_llvm_ptr; - } - const llvm_pl_index = if (layout.tag_size == 0) - 0 - else - @boolToInt(layout.tag_align >= layout.payload_align); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_pl_index, .False), - }; - const parent_llvm_ty = try dg.lowerType(parent_ty); - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .Struct => { - if (parent_ty.containerLayout(mod) == .Packed) { - if (!byte_aligned) return parent_llvm_ptr; - const llvm_usize = dg.context.intType(target.ptrBitWidth()); - const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); - // count bits of fields before this one - const prev_bits = b: { - var b: usize = 0; - for (parent_ty.structFields(mod).values()[0..field_index]) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - b += @intCast(usize, field.ty.bitSize(mod)); - } - break :b b; - }; - const byte_offset = llvm_usize.constInt(prev_bits / 8, .False); - const field_addr = base_addr.constAdd(byte_offset); - const final_llvm_ty = dg.context.pointerType(0); - return field_addr.constIntToPtr(final_llvm_ty); - } - - const parent_llvm_ty = try dg.lowerType(parent_ty); - if (llvmField(parent_ty, field_index, mod)) |llvm_field| { - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_field.index, .False), - }; - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - } else { - const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); - const indices: [1]*llvm.Value = .{llvm_index}; - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - } - }, - .Pointer => { - assert(parent_ty.isSlice(mod)); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(field_index, .False), - }; - const parent_llvm_ty = try dg.lowerType(parent_ty); - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - else => unreachable, - } - }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, true); - - const llvm_usize = try dg.lowerType(Type.usize); - const indices: [1]*llvm.Value = .{ - llvm_usize.constInt(elem_ptr.index, .False), - }; - const elem_llvm_ty = try dg.lowerType(elem_ptr.elem_ty); - return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .opt_payload_ptr => { - const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, true); - - const payload_ty = opt_payload_ptr.container_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or - payload_ty.optionalReprIsPayload(mod)) - { - // In this case, we represent pointer to optional the same as pointer - // to the payload. - return parent_llvm_ptr; - } - - const llvm_u32 = dg.context.intType(32); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(0, .False), - }; - const opt_llvm_ty = try dg.lowerType(opt_payload_ptr.container_ty); - return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .eu_payload_ptr => { - const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, true); - - const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - // In this case, we represent pointer to error union the same as pointer - // to the payload. - return parent_llvm_ptr; - } - - const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; - const llvm_u32 = dg.context.intType(32); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(payload_offset, .False), - }; - const eu_llvm_ty = try dg.lowerType(eu_payload_ptr.container_ty); - return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - else => unreachable, - } } fn lowerDeclRefValue( @@ -4380,20 +4250,6 @@ pub const DeclGen = struct { decl_index: Module.Decl.Index, ) Error!*llvm.Value { const mod = self.module; - if (tv.ty.isSlice(mod)) { - const ptr_ty = tv.ty.slicePtrFieldType(mod); - const fields: [2]*llvm.Value = .{ - try self.lowerValue(.{ - .ty = ptr_ty, - .val = tv.val, - }), - try self.lowerValue(.{ - .ty = Type.usize, - .val = try mod.intValue(Type.usize, tv.val.sliceLen(mod)), - }), - }; - return self.context.constStruct(&fields, fields.len, .False); - } // In the case of something like: // fn foo() void {} @@ -4401,13 +4257,13 @@ pub const DeclGen = struct { // ... &bar; // `bar` is just an alias and we actually want to lower a reference to `foo`. const decl = mod.declPtr(decl_index); - if (decl.val.castTag(.function)) |func| { - if (func.data.owner_decl != decl_index) { - return self.lowerDeclRefValue(tv, func.data.owner_decl); + if (decl.getFunction(mod)) |func| { + if (func.owner_decl != decl_index) { + return self.lowerDeclRefValue(tv, func.owner_decl); } - } else if (decl.val.castTag(.extern_fn)) |func| { - if (func.data.owner_decl != decl_index) { - return self.lowerDeclRefValue(tv, func.data.owner_decl); + } else if (decl.getExternFunc(mod)) |func| { + if (func.decl != decl_index) { + return self.lowerDeclRefValue(tv, func.decl); } } @@ -6333,11 +6189,11 @@ pub const FuncGen = struct { } fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const mod = self.dg.module; const dib = self.dg.object.di_builder orelse return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const func = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.dg.module; + const func = self.air.values[ty_pl.payload].getFunction(mod).?; const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); @@ -6395,8 +6251,8 @@ pub const FuncGen = struct { if (self.dg.object.di_builder == null) return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const func = self.air.values[ty_pl.payload].castTag(.function).?.data; const mod = self.dg.module; + const func = self.air.values[ty_pl.payload].getFunction(mod).?; const decl = mod.declPtr(func.owner_decl); const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; @@ -8349,7 +8205,7 @@ pub const FuncGen = struct { } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const func = self.dg.decl.getFunction().?; + const func = self.dg.decl.getFunction(mod).?; const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const di_local_var = dib.createParameterVariable( @@ -9147,7 +9003,7 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const llvm_ret_ty = try self.dg.lowerType(slice_ty); const usize_llvm_ty = try self.dg.lowerType(Type.usize); const slice_alignment = slice_ty.abiAlignment(mod); @@ -9861,7 +9717,7 @@ pub const FuncGen = struct { } const mod = self.dg.module; - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 612ac1f2527d..96c723989ae6 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -236,9 +236,9 @@ pub const DeclGen = struct { if (try self.air.value(inst, mod)) |val| { const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { - const fn_decl_index = switch (val.tag()) { - .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, - .function => val.castTag(.function).?.data.owner_decl, + const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, else => unreachable, }; const spv_decl_index = try self.resolveDecl(fn_decl_index); @@ -261,7 +261,7 @@ pub const DeclGen = struct { const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { // TODO: Extern fn? - const kind: SpvModule.DeclKind = if (decl.val.tag() == .function) + const kind: SpvModule.DeclKind = if (decl.getFunctionIndex(self.module) != .none) .func else .global; @@ -573,6 +573,7 @@ pub const DeclGen = struct { fn addDeclRef(self: *@This(), ty: Type, decl_index: Decl.Index) !void { const dg = self.dg; + const mod = dg.module; const ty_ref = try self.dg.resolveType(ty, .indirect); const ty_id = dg.typeId(ty_ref); @@ -580,8 +581,8 @@ pub const DeclGen = struct { const decl = dg.module.declPtr(decl_index); const spv_decl_index = try dg.resolveDecl(decl_index); - switch (decl.val.tag()) { - .function => { + switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + .func => { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by usize for now, // though. @@ -589,7 +590,7 @@ pub const DeclGen = struct { // TODO: Add dependency return; }, - .extern_fn => unreachable, // TODO + .extern_func => unreachable, // TODO else => { const result_id = dg.spv.allocId(); log.debug("addDeclRef: id = {}, index = {}, name = {s}", .{ result_id.id, @enumToInt(spv_decl_index), decl.name }); @@ -610,39 +611,23 @@ pub const DeclGen = struct { } } - fn lower(self: *@This(), ty: Type, val: Value) !void { + fn lower(self: *@This(), ty: Type, arg_val: Value) !void { const dg = self.dg; const mod = dg.module; - if (val.isUndef(mod)) { + var val = arg_val; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, + } + + if (val.isUndefDeep(mod)) { const size = ty.abiSize(mod); return try self.addUndef(size); } - switch (ty.zigTypeTag(mod)) { - .Int => try self.addInt(ty, val), - .Float => try self.addFloat(ty, val), - .Bool => try self.addConstBool(val.toBool(mod)), + if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { .Array => switch (val.tag()) { - .aggregate => { - const elem_vals = val.castTag(.aggregate).?.data; - const elem_ty = ty.childType(mod); - const len = @intCast(u32, ty.arrayLenIncludingSentinel(mod)); // TODO: limit spir-v to 32 bit arrays in a more elegant way. - for (elem_vals[0..len]) |elem_val| { - try self.lower(elem_ty, elem_val); - } - }, - .repeated => { - const elem_val = val.castTag(.repeated).?.data; - const elem_ty = ty.childType(mod); - const len = @intCast(u32, ty.arrayLen(mod)); - for (0..len) |_| { - try self.lower(elem_ty, elem_val); - } - if (ty.sentinel(mod)) |sentinel| { - try self.lower(elem_ty, sentinel); - } - }, .str_lit => { const str_lit = val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; @@ -657,29 +642,6 @@ pub const DeclGen = struct { }, else => |tag| return dg.todo("indirect array constant with tag {s}", .{@tagName(tag)}), }, - .Pointer => switch (val.tag()) { - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - try self.addDeclRef(ty, decl_index); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - try self.addDeclRef(ty, decl_index); - }, - .slice => { - const slice = val.castTag(.slice).?.data; - - const ptr_ty = ty.slicePtrFieldType(mod); - - try self.lower(ptr_ty, slice.ptr); - try self.addInt(Type.usize, slice.len); - }, - .zero => try self.addNullPtr(try dg.resolveType(ty, .indirect)), - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { - try self.addInt(Type.usize, val); - }, - else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}), - }, .Struct => { if (ty.isSimpleTupleOrAnonStruct(mod)) { unreachable; // TODO @@ -705,20 +667,134 @@ pub const DeclGen = struct { } } }, - .Optional => { + .Vector, + .Frame, + .AnyFrame, + => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}), + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => try self.addConstBool(val.toBool(mod)), + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => try self.addInt(ty, val), + .err => |err| { + const name = mod.intern_pool.stringToSlice(err.name); + const kv = try mod.getErrorValue(name); + try self.addConstInt(u16, @intCast(u16, kv.value)); + }, + .error_union => |error_union| { + const payload_ty = ty.errorUnionPayload(mod); + const is_pl = val.errorUnionIsPayload(mod); + const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); + + const eu_layout = dg.errorUnionLayout(payload_ty); + if (!eu_layout.payload_has_bits) { + return try self.lower(Type.anyerror, error_val); + } + + const payload_size = payload_ty.abiSize(mod); + const error_size = Type.anyerror.abiAlignment(mod); + const ty_size = ty.abiSize(mod); + const padding = ty_size - payload_size - error_size; + + const payload_val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(); + + if (eu_layout.error_first) { + try self.lower(Type.anyerror, error_val); + try self.lower(payload_ty, payload_val); + } else { + try self.lower(payload_ty, payload_val); + try self.lower(Type.anyerror, error_val); + } + + try self.addUndef(padding); + }, + .enum_tag => { + const int_val = try val.enumToInt(ty, mod); + + const int_ty = try ty.intTagType(mod); + + try self.lower(int_ty, int_val); + }, + .float => try self.addFloat(ty, val), + .ptr => |ptr| { + switch (ptr.addr) { + .decl => |decl| try self.addDeclRef(ty, decl), + .mut_decl => |mut_decl| try self.addDeclRef(ty, mut_decl.decl), + else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}), + } + if (ptr.len != .none) { + try self.addInt(Type.usize, ptr.len.toValue()); + } + }, + .opt => { const payload_ty = ty.optionalChild(mod); - const has_payload = !val.isNull(mod); + const payload_val = val.optionalValue(mod); const abi_size = ty.abiSize(mod); if (!payload_ty.hasRuntimeBits(mod)) { - try self.addConstBool(has_payload); + try self.addConstBool(payload_val != null); return; } else if (ty.optionalReprIsPayload(mod)) { // Optional representation is a nullable pointer or slice. - if (val.castTag(.opt_payload)) |payload| { - try self.lower(payload_ty, payload.data); - } else if (has_payload) { - try self.lower(payload_ty, val); + if (payload_val) |pl_val| { + try self.lower(payload_ty, pl_val); } else { const ptr_ty_ref = try dg.resolveType(ty, .indirect); try self.addNullPtr(ptr_ty_ref); @@ -734,27 +810,63 @@ pub const DeclGen = struct { const payload_size = payload_ty.abiSize(mod); const padding = abi_size - payload_size - 1; - if (val.castTag(.opt_payload)) |payload| { - try self.lower(payload_ty, payload.data); + if (payload_val) |pl_val| { + try self.lower(payload_ty, pl_val); } else { try self.addUndef(payload_size); } - try self.addConstBool(has_payload); + try self.addConstBool(payload_val != null); try self.addUndef(padding); }, - .Enum => { - const int_val = try val.enumToInt(ty, mod); + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type => |array_type| { + const elem_ty = array_type.child.toType(); + switch (aggregate.storage) { + .bytes => |bytes| try self.addBytes(bytes), + .elems, .repeated_elem => { + for (0..array_type.len) |i| { + try self.lower(elem_ty, switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elem_vals| elem_vals[@intCast(usize, i)].toValue(), + .repeated_elem => |elem_val| elem_val.toValue(), + }); + } + }, + } + if (array_type.sentinel != .none) { + try self.lower(elem_ty, array_type.sentinel.toValue()); + } + }, + .vector_type => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}), + .struct_type => { + const struct_ty = mod.typeToStruct(ty).?; - const int_ty = try ty.intTagType(mod); + if (struct_ty.layout == .Packed) { + return dg.todo("packed struct constants", .{}); + } - try self.lower(int_ty, int_val); + const struct_begin = self.size; + const field_vals = val.castTag(.aggregate).?.data; + for (struct_ty.fields.values(), 0..) |field, i| { + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; + try self.lower(field.ty, field_vals[i]); + + // Add padding if required. + // TODO: Add to type generation as well? + const unpadded_field_end = self.size - struct_begin; + const padded_field_end = ty.structFieldOffset(i + 1, mod); + const padding = padded_field_end - unpadded_field_end; + try self.addUndef(padding); + } + }, + .anon_struct_type => unreachable, // TODO + else => unreachable, }, - .Union => { - const tag_and_val = val.castTag(.@"union").?.data; + .un => |un| { const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { - return try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); + return try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue()); } const union_ty = mod.typeToUnion(ty).?; @@ -762,18 +874,18 @@ pub const DeclGen = struct { return dg.todo("packed union constants", .{}); } - const active_field = ty.unionTagFieldIndex(tag_and_val.tag, dg.module).?; + const active_field = ty.unionTagFieldIndex(un.tag.toValue(), dg.module).?; const active_field_ty = union_ty.fields.values()[active_field].ty; const has_tag = layout.tag_size != 0; const tag_first = layout.tag_align >= layout.payload_align; if (has_tag and tag_first) { - try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); + try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue()); } const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { - try self.lower(active_field_ty, tag_and_val.val); + try self.lower(active_field_ty, un.val.toValue()); break :blk active_field_ty.abiSize(mod); } else 0; @@ -781,53 +893,11 @@ pub const DeclGen = struct { try self.addUndef(payload_padding_len); if (has_tag and !tag_first) { - try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); + try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue()); } try self.addUndef(layout.padding); }, - .ErrorSet => switch (val.ip_index) { - .none => switch (val.tag()) { - .@"error" => { - const err_name = val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - try self.addConstInt(u16, @intCast(u16, kv.value)); - }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| try self.addConstInt(u16, @intCast(u16, int.storage.u64)), - else => unreachable, - }, - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); - - const eu_layout = dg.errorUnionLayout(payload_ty); - if (!eu_layout.payload_has_bits) { - return try self.lower(Type.anyerror, error_val); - } - - const payload_size = payload_ty.abiSize(mod); - const error_size = Type.anyerror.abiAlignment(mod); - const ty_size = ty.abiSize(mod); - const padding = ty_size - payload_size - error_size; - - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; - - if (eu_layout.error_first) { - try self.lower(Type.anyerror, error_val); - try self.lower(payload_ty, payload_val); - } else { - try self.lower(payload_ty, payload_val); - try self.lower(Type.anyerror, error_val); - } - - try self.addUndef(padding); - }, - else => |tag| return dg.todo("indirect constant of type {s}", .{@tagName(tag)}), } } }; @@ -1542,7 +1612,7 @@ pub const DeclGen = struct { const decl_id = self.spv.declPtr(spv_decl_index).result_id; log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name }); - if (decl.val.castTag(.function)) |_| { + if (decl.getFunction(mod)) |_| { assert(decl.ty.zigTypeTag(mod) == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ @@ -1595,8 +1665,8 @@ pub const DeclGen = struct { try self.generateTestEntryPoint(fqn, spv_decl_index); } } else { - const init_val = if (decl.val.castTag(.variable)) |payload| - payload.data.init + const init_val = if (decl.getVariable(mod)) |payload| + payload.init.toValue() else decl.val; diff --git a/src/link.zig b/src/link.zig index 1f34b0f760a3..a44a7387e9e6 100644 --- a/src/link.zig +++ b/src/link.zig @@ -564,7 +564,8 @@ pub const File = struct { } /// May be called before or after updateDeclExports for any given Decl. - pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void { + pub fn updateFunc(base: *File, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) UpdateDeclError!void { + const func = module.funcPtr(func_index); const owner_decl = module.declPtr(func.owner_decl); log.debug("updateFunc {*} ({s}), type={}", .{ owner_decl, owner_decl.name, owner_decl.ty.fmt(module), @@ -575,14 +576,14 @@ pub const File = struct { } switch (base.tag) { // zig fmt: off - .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func, air, liveness), - .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func, air, liveness), - .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func, air, liveness), - .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness), - .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness), - .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness), - .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness), - .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func, air, liveness), + .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func_index, air, liveness), + .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func_index, air, liveness), + .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func_index, air, liveness), + .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func_index, air, liveness), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func_index, air, liveness), + .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func_index, air, liveness), + .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func_index, air, liveness), + .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func_index, air, liveness), // zig fmt: on } } diff --git a/src/link/C.zig b/src/link/C.zig index 1a25bfe2317f..c871d8a02af9 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -87,12 +87,13 @@ pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void { } } -pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *C, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { const tracy = trace(@src()); defer tracy.end(); const gpa = self.base.allocator; + const func = module.funcPtr(func_index); const decl_index = func.owner_decl; const gop = try self.decl_table.getOrPut(gpa, decl_index); if (!gop.found_existing) { @@ -111,7 +112,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes .value_map = codegen.CValueMap.init(gpa), .air = air, .liveness = liveness, - .func = func, + .func_index = func_index, .object = .{ .dg = .{ .gpa = gpa, @@ -555,7 +556,8 @@ fn flushDecl( export_names: std.StringHashMapUnmanaged(void), ) FlushDeclError!void { const gpa = self.base.allocator; - const decl = self.base.options.module.?.declPtr(decl_index); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); // Before flushing any particular Decl we must ensure its // dependencies are already flushed, so that the order in the .c // file comes out correctly. @@ -569,7 +571,7 @@ fn flushDecl( try self.flushLazyFns(f, decl_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); - if (!(decl.isExtern() and export_names.contains(mem.span(decl.name)))) + if (!(decl.isExtern(mod) and export_names.contains(mem.span(decl.name)))) f.appendBufAssumeCapacity(decl_block.fwd_decl.items); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index efaeebc62e7a..f4ee2fde976c 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1032,18 +1032,19 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void { self.getAtomPtr(atom_index).sym_index = 0; } -pub fn updateFunc(self: *Coff, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Coff, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| { - return llvm_object.updateFunc(mod, func, air, liveness); + return llvm_object.updateFunc(mod, func_index, air, liveness); } } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -1057,7 +1058,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func: *Module.Fn, air: Air, livenes const res = try codegen.generateFunction( &self.base, decl.srcLoc(mod), - func, + func_index, air, liveness, &code_buffer, @@ -1155,11 +1156,10 @@ pub fn updateDecl( const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -1172,7 +1172,7 @@ pub fn updateDecl( var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, @@ -1313,7 +1313,7 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.castTag(.variable)) |_| { + if (decl.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.rdata_section_index.?; @@ -1425,7 +1425,7 @@ pub fn updateDeclExports( // detect the default subsystem. for (exports) |exp| { const exported_decl = mod.declPtr(exp.exported_decl); - if (exported_decl.getFunction() == null) continue; + if (exported_decl.getFunctionIndex(mod) == .none) continue; const winapi_cc = switch (self.base.options.target.cpu.arch) { .x86 => std.builtin.CallingConvention.Stdcall, else => std.builtin.CallingConvention.C, diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index ed2883f4daea..d6dd6979eaab 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -971,7 +971,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) // For functions we need to add a prologue to the debug line program. try dbg_line_buffer.ensureTotalCapacity(26); - const func = decl.val.castTag(.function).?.data; + const func = decl.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ decl.src_line, func.lbrace_line, @@ -1514,7 +1514,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []cons } } -pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.Decl.Index) !void { +pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !void { const tracy = trace(@src()); defer tracy.end(); @@ -1522,8 +1522,8 @@ pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.De const atom = self.getAtom(.src_fn, atom_index); if (atom.len == 0) return; - const decl = module.declPtr(decl_index); - const func = decl.val.castTag(.function).?.data; + const decl = mod.declPtr(decl_index); + const func = decl.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ decl.src_line, func.lbrace_line, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index b27967884eb5..476b939038d6 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2465,7 +2465,7 @@ fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.castTag(.variable)) |_| { + if (decl.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.rodata_section_index.?; @@ -2574,17 +2574,18 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s return local_sym; } -pub fn updateFunc(self: *Elf, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Elf, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -2599,11 +2600,11 @@ pub fn updateFunc(self: *Elf, mod: *Module, func: *Module.Fn, air: Air, liveness defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| - try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .{ + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .{ .dwarf = ds, }) else - try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .none); const code = switch (res) { .ok => code_buffer.items, @@ -2646,11 +2647,10 @@ pub fn updateDecl( const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -2667,7 +2667,7 @@ pub fn updateDecl( defer if (decl_state) |*ds| ds.deinit(); // TODO implement .debug_info for global variables - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index e7723595dbe2..ffbdcdb91f02 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1847,16 +1847,17 @@ fn addStubEntry(self: *MachO, target: SymbolWithLoc) !void { self.markRelocsDirtyByTarget(target); } -pub fn updateFunc(self: *MachO, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *MachO, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -1874,11 +1875,11 @@ pub fn updateFunc(self: *MachO, mod: *Module, func: *Module.Fn, air: Air, livene defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| - try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .{ + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .{ .dwarf = ds, }) else - try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .none); var code = switch (res) { .ok => code_buffer.items, @@ -1983,18 +1984,17 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } } - const is_threadlocal = if (decl.val.castTag(.variable)) |payload| - payload.data.is_threadlocal and !self.base.options.single_threaded + const is_threadlocal = if (decl.getVariable(mod)) |variable| + variable.is_threadlocal and !self.base.options.single_threaded else false; if (is_threadlocal) return self.updateThreadlocalVariable(mod, decl_index); @@ -2012,7 +2012,7 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo null; defer if (decl_state) |*ds| ds.deinit(); - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -2177,7 +2177,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D const decl = module.declPtr(decl_index); const decl_metadata = self.decls.get(decl_index).?; - const decl_val = decl.val.castTag(.variable).?.data.init; + const decl_val = decl.getVariable(mod).?.init.toValue(); const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -2278,8 +2278,8 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { } } - if (val.castTag(.variable)) |variable| { - if (variable.data.is_threadlocal and !single_threaded) { + if (decl.getVariable(mod)) |variable| { + if (variable.is_threadlocal and !single_threaded) { break :blk self.thread_data_section_index.?; } break :blk self.data_section_index.?; @@ -2289,7 +2289,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.castTag(.variable)) |_| { + if (decl.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.data_const_section_index.?; diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index 69cd73a6025f..b74518d930e3 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -68,9 +68,9 @@ pub fn deinit(self: *NvPtx) void { self.base.allocator.free(self.ptx_file_name); } -pub fn updateFunc(self: *NvPtx, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *NvPtx, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (!build_options.have_llvm) return; - try self.llvm_object.updateFunc(module, func, air, liveness); + try self.llvm_object.updateFunc(module, func_index, air, liveness); } pub fn updateDecl(self: *NvPtx, module: *Module, decl_index: Module.Decl.Index) !void { diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 968cbb0e7ec4..2071833b9356 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -276,11 +276,12 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi } } -pub fn updateFunc(self: *Plan9, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Plan9, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); self.freeUnnamedConsts(decl_index); @@ -299,7 +300,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func: *Module.Fn, air: Air, livene const res = try codegen.generateFunction( &self.base, decl.srcLoc(mod), - func, + func_index, air, liveness, &code_buffer, @@ -391,11 +392,10 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void { const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -407,7 +407,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -771,7 +771,7 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { // in the deleteUnusedDecl function. const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const is_fn = (decl.val.tag() == .function); + const is_fn = decl.getFunctionIndex(mod) != .none; if (is_fn) { var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?; var submap = symidx_and_submap.functions; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index da25753b9501..0a6608303e09 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -103,11 +103,13 @@ pub fn deinit(self: *SpirV) void { self.decl_link.deinit(); } -pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *SpirV, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } + const func = module.funcPtr(func_index); + var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &self.spv, &self.decl_link); defer decl_gen.deinit(); @@ -136,7 +138,7 @@ pub fn updateDeclExports( exports: []const *Module.Export, ) !void { const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .function and decl.ty.fnCallingConvention(mod) == .Kernel) { + if (decl.getFunctionIndex(mod) != .none and decl.ty.fnCallingConvention(mod) == .Kernel) { // TODO: Unify with resolveDecl in spirv.zig. const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index ef97a7fa7f33..78d1be978b20 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1324,17 +1324,18 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 { return index; } -pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); + if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const atom_index = try wasm.getOrCreateAtomForDecl(decl_index); @@ -1358,7 +1359,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes const result = try codegen.generateFunction( &wasm.base, decl.srcLoc(mod), - func, + func_index, air, liveness, &code_writer, @@ -1403,9 +1404,9 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi defer tracy.end(); const decl = mod.declPtr(decl_index); - if (decl.val.castTag(.function)) |_| { + if (decl.getFunction(mod)) |_| { return; - } else if (decl.val.castTag(.extern_fn)) |_| { + } else if (decl.getExternFunc(mod)) |_| { return; } @@ -1413,12 +1414,13 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi const atom = wasm.getAtomPtr(atom_index); atom.clear(); - if (decl.isExtern()) { - const variable = decl.getVariable().?; + if (decl.isExtern(mod)) { + const variable = decl.getVariable(mod).?; const name = mem.sliceTo(decl.name, 0); - return wasm.addOrUpdateImport(name, atom.sym_index, variable.lib_name, null); + const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name); + return wasm.addOrUpdateImport(name, atom.sym_index, lib_name, null); } - const val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; var code_writer = std.ArrayList(u8).init(wasm.base.allocator); defer code_writer.deinit(); @@ -1791,7 +1793,7 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void { assert(wasm.symbol_atom.remove(local_atom.symbolLoc())); } - if (decl.isExtern()) { + if (decl.isExtern(mod)) { _ = wasm.imports.remove(atom.symbolLoc()); } _ = wasm.resolved_symbols.swapRemove(atom.symbolLoc()); @@ -1852,7 +1854,7 @@ pub fn addOrUpdateImport( /// Symbol index that is external symbol_index: u32, /// Optional library name (i.e. `extern "c" fn foo() void` - lib_name: ?[*:0]const u8, + lib_name: ?[:0]const u8, /// The index of the type that represents the function signature /// when the extern is a function. When this is null, a data-symbol /// is asserted instead. @@ -1863,7 +1865,7 @@ pub fn addOrUpdateImport( // Also mangle the name when the lib name is set and not equal to "C" so imports with the same // name but different module can be resolved correctly. const mangle_name = lib_name != null and - !std.mem.eql(u8, std.mem.sliceTo(lib_name.?, 0), "c"); + !std.mem.eql(u8, lib_name.?, "c"); const full_name = if (mangle_name) full_name: { break :full_name try std.fmt.allocPrint(wasm.base.allocator, "{s}|{s}", .{ name, lib_name.? }); } else name; @@ -1889,7 +1891,7 @@ pub fn addOrUpdateImport( if (type_index) |ty_index| { const gop = try wasm.imports.getOrPut(wasm.base.allocator, .{ .index = symbol_index, .file = null }); const module_name = if (lib_name) |l_name| blk: { - break :blk mem.sliceTo(l_name, 0); + break :blk l_name; } else wasm.host_name; if (!gop.found_existing) { gop.value_ptr.* = .{ @@ -2931,7 +2933,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 { const atom_index = try wasm.createAtom(); const atom = wasm.getAtomPtr(atom_index); - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const mod = wasm.base.options.module.?; atom.alignment = slice_ty.abiAlignment(mod); const sym_index = atom.sym_index; @@ -2988,7 +2990,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { for (mod.error_name_list.items) |error_name| { const len = @intCast(u32, error_name.len + 1); // names are 0-termianted - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const offset = @intCast(u32, atom.code.items.len); // first we create the data for the slice of the name try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated @@ -3366,15 +3368,15 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod var decl_it = wasm.decls.iterator(); while (decl_it.next()) |entry| { const decl = mod.declPtr(entry.key_ptr.*); - if (decl.isExtern()) continue; + if (decl.isExtern(mod)) continue; const atom_index = entry.value_ptr.*; const atom = wasm.getAtomPtr(atom_index); if (decl.ty.zigTypeTag(mod) == .Fn) { try wasm.parseAtom(atom_index, .function); - } else if (decl.getVariable()) |variable| { - if (!variable.is_mutable) { + } else if (decl.getVariable(mod)) |variable| { + if (variable.is_const) { try wasm.parseAtom(atom_index, .{ .data = .read_only }); - } else if (variable.init.isUndefDeep(mod)) { + } else if (variable.init.toValue().isUndefDeep(mod)) { // for safe build modes, we store the atom in the data segment, // whereas for unsafe build modes we store it in bss. const is_initialized = wasm.base.options.optimize_mode == .Debug or diff --git a/src/print_air.zig b/src/print_air.zig index ef52b4c085eb..9169a88bbcc0 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -699,8 +699,8 @@ const Writer = struct { fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; - const function = w.air.values[ty_pl.payload].castTag(.function).?.data; - const owner_decl = w.module.declPtr(function.owner_decl); + const func_index = w.module.intern_pool.indexToFunc(w.air.values[ty_pl.payload].ip_index); + const owner_decl = w.module.declPtr(w.module.funcPtrUnwrap(func_index).?.owner_decl); try s.print("{s}", .{owner_decl.name}); } diff --git a/src/type.zig b/src/type.zig index f2fad91eba2b..087dc88c30fb 100644 --- a/src/type.zig +++ b/src/type.zig @@ -93,16 +93,23 @@ pub const Type = struct { }, // values, not types - .undef => unreachable, - .un => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .simple_value => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }; } @@ -358,7 +365,7 @@ pub const Type = struct { const func = ies.func; try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - const owner_decl = mod.declPtr(func.owner_decl); + const owner_decl = mod.declPtr(mod.funcPtr(func).owner_decl); try owner_decl.renderFullyQualifiedName(mod, writer); try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); }, @@ -467,16 +474,23 @@ pub const Type = struct { }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, } } @@ -675,16 +689,23 @@ pub const Type = struct { .enum_type => |enum_type| enum_type.tag_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -777,16 +798,23 @@ pub const Type = struct { }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }; } @@ -866,8 +894,8 @@ pub const Type = struct { /// May capture a reference to `ty`. /// Returned value has type `comptime_int`. - pub fn lazyAbiAlignment(ty: Type, mod: *Module, arena: Allocator) !Value { - switch (try ty.abiAlignmentAdvanced(mod, .{ .lazy = arena })) { + pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { + switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { .val => |val| return val, .scalar => |x| return mod.intValue(Type.comptime_int, x), } @@ -880,7 +908,7 @@ pub const Type = struct { pub const AbiAlignmentAdvancedStrat = union(enum) { eager, - lazy: Allocator, + lazy, sema: *Sema, }; @@ -1019,16 +1047,18 @@ pub const Type = struct { if (!struct_obj.haveFieldTypes()) switch (strat) { .eager => unreachable, // struct layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }; if (struct_obj.layout == .Packed) { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; - } - }, + .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, .eager => {}, } assert(struct_obj.haveLayout()); @@ -1039,7 +1069,10 @@ pub const Type = struct { var big_align: u32 = 0; for (fields.values()) |field| { if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) continue; @@ -1050,7 +1083,10 @@ pub const Type = struct { .val => switch (strat) { .eager => unreachable, // struct layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }, }; big_align = @max(big_align, field_align); @@ -1077,7 +1113,10 @@ pub const Type = struct { .val => switch (strat) { .eager => unreachable, // field type alignment not resolved .sema => unreachable, // passed to abiAlignmentAdvanced above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }, } } @@ -1092,16 +1131,23 @@ pub const Type = struct { .enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, } } @@ -1118,7 +1164,10 @@ pub const Type = struct { switch (strat) { .eager, .sema => { if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) { return AbiAlignmentAdvanced{ .scalar = code_align }; @@ -1128,7 +1177,7 @@ pub const Type = struct { (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, ) }; }, - .lazy => |arena| { + .lazy => { switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |payload_align| { return AbiAlignmentAdvanced{ @@ -1137,7 +1186,10 @@ pub const Type = struct { }, .val => {}, } - return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; + return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }; }, } } @@ -1160,16 +1212,22 @@ pub const Type = struct { switch (strat) { .eager, .sema => { if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) { return AbiAlignmentAdvanced{ .scalar = 1 }; } return child_type.abiAlignmentAdvanced(mod, strat); }, - .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(mod, strat)) { + .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) { .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) }, - .val => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .val => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }, } } @@ -1198,7 +1256,10 @@ pub const Type = struct { if (!union_obj.haveFieldTypes()) switch (strat) { .eager => unreachable, // union layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }; if (union_obj.fields.count() == 0) { if (have_tag) { @@ -1212,7 +1273,10 @@ pub const Type = struct { if (have_tag) max_align = union_obj.tag_ty.abiAlignment(mod); for (union_obj.fields.values()) |field| { if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) continue; @@ -1223,7 +1287,10 @@ pub const Type = struct { .val => switch (strat) { .eager => unreachable, // struct layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }, }; max_align = @max(max_align, field_align); @@ -1232,8 +1299,8 @@ pub const Type = struct { } /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, mod: *Module, arena: Allocator) !Value { - switch (try ty.abiSizeAdvanced(mod, .{ .lazy = arena })) { + pub fn lazyAbiSize(ty: Type, mod: *Module) !Value { + switch (try ty.abiSizeAdvanced(mod, .lazy)) { .val => |val| return val, .scalar => |x| return mod.intValue(Type.comptime_int, x), } @@ -1283,7 +1350,10 @@ pub const Type = struct { .scalar => |elem_size| return .{ .scalar = len * elem_size }, .val => switch (strat) { .sema, .eager => unreachable, - .lazy => |arena| return .{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }, } }, @@ -1291,9 +1361,10 @@ pub const Type = struct { const opt_sema = switch (strat) { .sema => |sema| sema, .eager => null, - .lazy => |arena| return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(arena, ty), - }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }; const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); const elem_bits = @intCast(u32, elem_bits_u64); @@ -1301,9 +1372,10 @@ pub const Type = struct { const total_bytes = (total_bits + 7) / 8; const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |x| x, - .val => return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(strat.lazy, ty), - }, + .val => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }; const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); return AbiSizeAdvanced{ .scalar = result }; @@ -1320,7 +1392,10 @@ pub const Type = struct { // in abiAlignmentAdvanced. const code_size = abiSize(Type.anyerror, mod); if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) { // Same as anyerror. @@ -1333,7 +1408,10 @@ pub const Type = struct { .val => switch (strat) { .sema => unreachable, .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }, }; @@ -1420,11 +1498,10 @@ pub const Type = struct { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - }, + .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, .eager => {}, } assert(struct_obj.haveLayout()); @@ -1433,12 +1510,13 @@ pub const Type = struct { else => { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { + .lazy => { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return AbiSizeAdvanced{ .scalar = 0 }; - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } + if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }; }, .eager => {}, } @@ -1469,16 +1547,23 @@ pub const Type = struct { .enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = enum_type.tag_ty.toType().abiSize(mod) }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, } } @@ -1492,11 +1577,10 @@ pub const Type = struct { ) Module.CompileError!AbiSizeAdvanced { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!union_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - }, + .lazy => if (!union_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, .eager => {}, } return AbiSizeAdvanced{ .scalar = union_obj.abiSize(mod, have_tag) }; @@ -1514,7 +1598,10 @@ pub const Type = struct { } if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) return AbiSizeAdvanced{ .scalar = 1 }; @@ -1527,7 +1614,10 @@ pub const Type = struct { .val => switch (strat) { .sema => unreachable, .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }, }; @@ -1690,16 +1780,23 @@ pub const Type = struct { .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, } } @@ -2270,16 +2367,23 @@ pub const Type = struct { .opaque_type => unreachable, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -2443,16 +2547,17 @@ pub const Type = struct { .inferred_error_set_type, => return null, - .array_type => |array_type| { - if (array_type.len == 0) - return Value.initTag(.empty_array); - if ((try array_type.child.toType().onePossibleValue(mod)) != null) - return Value.initTag(.the_only_possible_value); - return null; - }, - .vector_type => |vector_type| { - if (vector_type.len == 0) return Value.initTag(.empty_array); - if (try vector_type.child.toType().onePossibleValue(mod)) |v| return v; + inline .array_type, .vector_type => |seq_type| { + if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .elems = &.{} }, + } })).toValue(); + if (try seq_type.child.toType().onePossibleValue(mod)) |opv| { + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = opv.ip_index }, + } })).toValue(); + } return null; }, .opt_type => |child| { @@ -2595,16 +2700,23 @@ pub const Type = struct { }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -2733,16 +2845,23 @@ pub const Type = struct { .enum_type => |enum_type| enum_type.tag_ty.toType().comptimeOnly(mod), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -2802,13 +2921,12 @@ pub const Type = struct { } // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, arena: Allocator, mod: *Module) !Value { + pub fn minInt(ty: Type, mod: *Module) !Value { const scalar = try minIntScalar(ty.scalarType(mod), mod); - if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { - return Value.Tag.repeated.create(arena, scalar); - } else { - return scalar; - } + return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = scalar.ip_index }, + } })).toValue() else scalar; } /// Asserts that the type is an integer. @@ -2832,13 +2950,12 @@ pub const Type = struct { // Works for vectors and vectors of integers. /// The returned Value will have type dest_ty. - pub fn maxInt(ty: Type, arena: Allocator, mod: *Module, dest_ty: Type) !Value { + pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty); - if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { - return Value.Tag.repeated.create(arena, scalar); - } else { - return scalar; - } + return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = scalar.ip_index }, + } })).toValue() else scalar; } /// The returned Value will have type dest_ty. @@ -3386,12 +3503,12 @@ pub const Type = struct { pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type }; pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type }; - pub const const_slice_u8: Type = .{ .ip_index = .const_slice_u8_type }; + pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type }; pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type }; pub const single_const_pointer_to_comptime_int: Type = .{ .ip_index = .single_const_pointer_to_comptime_int_type, }; - pub const const_slice_u8_sentinel_0: Type = .{ .ip_index = .const_slice_u8_sentinel_0_type }; + pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type }; pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type }; pub const generic_poison: Type = .{ .ip_index = .generic_poison_type }; diff --git a/src/value.zig b/src/value.zig index b1c94d46b5e2..47215e588cbc 100644 --- a/src/value.zig +++ b/src/value.zig @@ -33,64 +33,12 @@ pub const Value = struct { // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - /// The only possible value for a particular type, which is stored externally. - the_only_possible_value, - - empty_array, // See last_no_payload_tag below. // After this, the tag requires a payload. - function, - extern_fn, - /// A comptime-known pointer can point to the address of a global - /// variable. The child element value in this case will have this tag. - variable, - /// A wrapper for values which are comptime-known but should - /// semantically be runtime-known. - runtime_value, - /// Represents a pointer to a Decl. - /// When machine codegen backend sees this, it must set the Decl's `alive` field to true. - decl_ref, - /// Pointer to a Decl, but allows comptime code to mutate the Decl's Value. - /// This Tag will never be seen by machine codegen backends. It is changed into a - /// `decl_ref` when a comptime variable goes out of scope. - decl_ref_mut, - /// Behaves like `decl_ref_mut` but validates that the stored value matches the field value. - comptime_field_ptr, - /// Pointer to a specific element of an array, vector or slice. - elem_ptr, - /// Pointer to a specific field of a struct or union. - field_ptr, /// A slice of u8 whose memory is managed externally. bytes, /// Similar to bytes however it stores an index relative to `Module.string_literal_bytes`. str_lit, - /// This value is repeated some number of times. The amount of times to repeat - /// is stored externally. - repeated, - /// An array with length 0 but it has a sentinel. - empty_array_sentinel, - /// Pointer and length as sub `Value` objects. - slice, - enum_literal, - @"error", - /// When the type is error union: - /// * If the tag is `.@"error"`, the error union is an error. - /// * If the tag is `.eu_payload`, the error union is a payload. - /// * A nested error such as `anyerror!(anyerror!T)` in which the the outer error union - /// is non-error, but the inner error union is an error, is represented as - /// a tag of `.eu_payload`, with a sub-tag of `.@"error"`. - eu_payload, - /// A pointer to the payload of an error union, based on a pointer to an error union. - eu_payload_ptr, - /// When the type is optional: - /// * If the tag is `.null_value`, the optional is null. - /// * If the tag is `.opt_payload`, the optional is a payload. - /// * A nested optional such as `??T` in which the the outer optional - /// is non-null, but the inner optional is null, is represented as - /// a tag of `.opt_payload`, with a sub-tag of `.null_value`. - opt_payload, - /// A pointer to the payload of an optional, based on a pointer to an optional. - opt_payload_ptr, /// An instance of a struct, array, or vector. /// Each element/field stored as a `Value`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, @@ -104,57 +52,19 @@ pub const Value = struct { /// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc /// instructions for comptime code. inferred_alloc_comptime, - /// The ABI alignment of the payload type. - lazy_align, - /// The ABI size of the payload type. - lazy_size, - pub const last_no_payload_tag = Tag.empty_array; - pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; + pub const no_payload_count = 0; pub fn Type(comptime t: Tag) type { return switch (t) { - .the_only_possible_value, - .empty_array, - => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), - - .extern_fn => Payload.ExternFn, - - .decl_ref => Payload.Decl, - - .repeated, - .eu_payload, - .opt_payload, - .empty_array_sentinel, - .runtime_value, - => Payload.SubValue, - - .eu_payload_ptr, - .opt_payload_ptr, - => Payload.PayloadPtr, - - .bytes, - .enum_literal, - => Payload.Bytes, + .bytes => Payload.Bytes, .str_lit => Payload.StrLit, - .slice => Payload.Slice, - - .lazy_align, - .lazy_size, - => Payload.Ty, - - .function => Payload.Function, - .variable => Payload.Variable, - .decl_ref_mut => Payload.DeclRefMut, - .elem_ptr => Payload.ElemPtr, - .field_ptr => Payload.FieldPtr, - .@"error" => Payload.Error, + .inferred_alloc => Payload.InferredAlloc, .inferred_alloc_comptime => Payload.InferredAllocComptime, .aggregate => Payload.Aggregate, .@"union" => Payload.Union, - .comptime_field_ptr => Payload.ComptimeFieldPtr, }; } @@ -249,91 +159,6 @@ pub const Value = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .the_only_possible_value, - .empty_array, - => unreachable, - - .lazy_align, .lazy_size => { - const payload = self.cast(Payload.Ty).?; - const new_payload = try arena.create(Payload.Ty); - new_payload.* = .{ - .base = payload.base, - .data = payload.data, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .function => return self.copyPayloadShallow(arena, Payload.Function), - .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn), - .variable => return self.copyPayloadShallow(arena, Payload.Variable), - .decl_ref => return self.copyPayloadShallow(arena, Payload.Decl), - .decl_ref_mut => return self.copyPayloadShallow(arena, Payload.DeclRefMut), - .eu_payload_ptr, - .opt_payload_ptr, - => { - const payload = self.cast(Payload.PayloadPtr).?; - const new_payload = try arena.create(Payload.PayloadPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .container_ptr = try payload.data.container_ptr.copy(arena), - .container_ty = payload.data.container_ty, - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .comptime_field_ptr => { - const payload = self.cast(Payload.ComptimeFieldPtr).?; - const new_payload = try arena.create(Payload.ComptimeFieldPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .field_val = try payload.data.field_val.copy(arena), - .field_ty = payload.data.field_ty, - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .elem_ptr => { - const payload = self.castTag(.elem_ptr).?; - const new_payload = try arena.create(Payload.ElemPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .array_ptr = try payload.data.array_ptr.copy(arena), - .elem_ty = payload.data.elem_ty, - .index = payload.data.index, - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .field_ptr => { - const payload = self.castTag(.field_ptr).?; - const new_payload = try arena.create(Payload.FieldPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .container_ptr = try payload.data.container_ptr.copy(arena), - .container_ty = payload.data.container_ty, - .field_index = payload.data.field_index, - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, .bytes => { const bytes = self.castTag(.bytes).?.data; const new_payload = try arena.create(Payload.Bytes); @@ -347,52 +172,6 @@ pub const Value = struct { }; }, .str_lit => return self.copyPayloadShallow(arena, Payload.StrLit), - .repeated, - .eu_payload, - .opt_payload, - .empty_array_sentinel, - .runtime_value, - => { - const payload = self.cast(Payload.SubValue).?; - const new_payload = try arena.create(Payload.SubValue); - new_payload.* = .{ - .base = payload.base, - .data = try payload.data.copy(arena), - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .slice => { - const payload = self.castTag(.slice).?; - const new_payload = try arena.create(Payload.Slice); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .ptr = try payload.data.ptr.copy(arena), - .len = try payload.data.len.copy(arena), - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .enum_literal => { - const payload = self.castTag(.enum_literal).?; - const new_payload = try arena.create(Payload.Bytes); - new_payload.* = .{ - .base = payload.base, - .data = try arena.dupe(u8, payload.data), - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .@"error" => return self.copyPayloadShallow(arena, Payload.Error), - .aggregate => { const payload = self.castTag(.aggregate).?; const new_payload = try arena.create(Payload.Aggregate); @@ -453,7 +232,7 @@ pub const Value = struct { pub fn dump( start_val: Value, comptime fmt: []const u8, - options: std.fmt.FormatOptions, + _: std.fmt.FormatOptions, out_stream: anytype, ) !void { comptime assert(fmt.len == 0); @@ -469,44 +248,6 @@ pub const Value = struct { .@"union" => { return out_stream.writeAll("(union value)"); }, - .the_only_possible_value => return out_stream.writeAll("(the only possible value)"), - .lazy_align => { - try out_stream.writeAll("@alignOf("); - try val.castTag(.lazy_align).?.data.dump("", options, out_stream); - return try out_stream.writeAll(")"); - }, - .lazy_size => { - try out_stream.writeAll("@sizeOf("); - try val.castTag(.lazy_size).?.data.dump("", options, out_stream); - return try out_stream.writeAll(")"); - }, - .runtime_value => return out_stream.writeAll("[runtime value]"), - .function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}), - .extern_fn => return out_stream.writeAll("(extern function)"), - .variable => return out_stream.writeAll("(variable)"), - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - return out_stream.print("(decl_ref_mut {d})", .{decl_index}); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - return out_stream.print("(decl_ref {d})", .{decl_index}); - }, - .comptime_field_ptr => { - return out_stream.writeAll("(comptime_field_ptr)"); - }, - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - try out_stream.print("&[{}] ", .{elem_ptr.index}); - val = elem_ptr.array_ptr; - }, - .field_ptr => { - const field_ptr = val.castTag(.field_ptr).?.data; - try out_stream.print("fieldptr({d}) ", .{field_ptr.field_index}); - val = field_ptr.container_ptr; - }, - .empty_array => return out_stream.writeAll(".{}"), - .enum_literal => return out_stream.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .str_lit => { const str_lit = val.castTag(.str_lit).?.data; @@ -514,31 +255,8 @@ pub const Value = struct { str_lit.index, str_lit.len, }); }, - .repeated => { - try out_stream.writeAll("(repeated) "); - val = val.castTag(.repeated).?.data; - }, - .empty_array_sentinel => return out_stream.writeAll("(empty array with sentinel)"), - .slice => return out_stream.writeAll("(slice)"), - .@"error" => return out_stream.print("error.{s}", .{val.castTag(.@"error").?.data.name}), - .eu_payload => { - try out_stream.writeAll("(eu_payload) "); - val = val.castTag(.eu_payload).?.data; - }, - .opt_payload => { - try out_stream.writeAll("(opt_payload) "); - val = val.castTag(.opt_payload).?.data; - }, .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"), - .eu_payload_ptr => { - try out_stream.writeAll("(eu_payload_ptr)"); - val = val.castTag(.eu_payload_ptr).?.data.container_ptr; - }, - .opt_payload_ptr => { - try out_stream.writeAll("(opt_payload_ptr)"); - val = val.castTag(.opt_payload_ptr).?.data.container_ptr; - }, }; } @@ -569,30 +287,23 @@ pub const Value = struct { const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; return allocator.dupe(u8, bytes); }, - .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data), - .repeated => { - const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); - @memset(result, byte); - return result; - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - const decl_val = try decl.value(); - return decl_val.toAllocatedBytes(decl.ty, allocator, mod); - }, - .the_only_possible_value => return &[_]u8{}, - .slice => { - const slice = val.castTag(.slice).?.data; - return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod); - }, else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), .ptr => |ptr| switch (ptr.len) { .none => unreachable, - else => return arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), + else => arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try allocator.dupe(u8, bytes), + .elems => arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + .repeated_elem => |elem| { + const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); + const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); + @memset(result, byte); + return result; + }, }, else => unreachable, }, @@ -611,29 +322,6 @@ pub const Value = struct { pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.ip_index, ty.ip_index); switch (val.tag()) { - .elem_ptr => { - const pl = val.castTag(.elem_ptr).?.data; - return mod.intern(.{ .ptr = .{ - .ty = ty.ip_index, - .addr = .{ .elem = .{ - .base = pl.array_ptr.ip_index, - .index = pl.index, - } }, - } }); - }, - .slice => { - const pl = val.castTag(.slice).?.data; - const ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod); - return mod.intern(.{ .ptr = .{ - .ty = ty.ip_index, - .addr = mod.intern_pool.indexToKey(ptr).ptr.addr, - .len = try pl.len.intern(Type.usize, mod), - } }); - }, - .opt_payload => return mod.intern(.{ .opt = .{ - .ty = ty.ip_index, - .val = try val.castTag(.opt_payload).?.data.intern(ty.childType(mod), mod), - } }), .aggregate => { const old_elems = val.castTag(.aggregate).?.data; const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); @@ -651,13 +339,6 @@ pub const Value = struct { .storage = .{ .elems = new_elems }, } }); }, - .repeated => return mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, - .storage = .{ .repeated_elem = try val.castTag(.repeated).?.data.intern( - ty.structFieldType(0, mod), - mod, - ) }, - } }), .@"union" => { const pl = val.castTag(.@"union").?.data; return mod.intern(.{ .un = .{ @@ -679,7 +360,6 @@ pub const Value = struct { for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = old_elem.toValue(); return Tag.aggregate.create(arena, new_elems); }, - .repeated_elem => |elem| return Tag.repeated.create(arena, elem.toValue()), }, else => return val, } @@ -698,31 +378,21 @@ pub const Value = struct { pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { const ip = &mod.intern_pool; switch (val.ip_index) { - .none => { - const field_index = switch (val.tag()) { - .the_only_possible_value => blk: { - assert(ty.enumFieldCount(mod) == 1); - break :blk 0; - }, - .enum_literal => i: { - const name = val.castTag(.enum_literal).?.data; - break :i ty.enumFieldIndex(name, mod).?; - }, - else => unreachable, - }; - return switch (ip.indexToKey(ty.ip_index)) { - // Assume it is already an integer and return it directly. - .simple_type, .int_type => val, - .enum_type => |enum_type| if (enum_type.values.len != 0) - enum_type.values[field_index].toValue() - else // Field index and integer values are the same. - mod.intValue(enum_type.tag_ty.toType(), field_index), - else => unreachable, - }; - }, else => return switch (ip.indexToKey(ip.typeOf(val.ip_index))) { // Assume it is already an integer and return it directly. .simple_type, .int_type => val, + .enum_literal => |enum_literal| { + const field_index = ty.enumFieldIndex(ip.stringToSlice(enum_literal), mod).?; + return switch (ip.indexToKey(ty.ip_index)) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_type => |enum_type| if (enum_type.values.len != 0) + enum_type.values[field_index].toValue() + else // Field index and integer values are the same. + mod.intValue(enum_type.tag_ty.toType(), field_index), + else => unreachable, + }; + }, .enum_type => |enum_type| (try ip.getCoerced( mod.gpa, val.ip_index, @@ -733,18 +403,12 @@ pub const Value = struct { } } - pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { - _ = ty; // TODO: remove this parameter now that we use InternPool - - if (val.castTag(.enum_literal)) |payload| { - return payload.data; - } - + pub fn tagName(val: Value, mod: *Module) []const u8 { const ip = &mod.intern_pool; - const enum_tag = switch (ip.indexToKey(val.ip_index)) { .un => |un| ip.indexToKey(un.tag).enum_tag, .enum_tag => |x| x, + .enum_literal => |name| return ip.stringToSlice(name), else => unreachable, }; const enum_type = ip.indexToKey(enum_tag.ty).enum_type; @@ -773,49 +437,61 @@ pub const Value = struct { .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), .undef => unreachable, .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), - .none => switch (val.tag()) { - .the_only_possible_value, // i0, u0 - => BigIntMutable.init(&space.limbs, 0).toConst(), - - .runtime_value => { - const sub_val = val.castTag(.runtime_value).?.data; - return sub_val.toBigIntAdvanced(space, mod, opt_sema); - }, - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - try sema.resolveTypeLayout(ty); - } - const x = ty.abiAlignment(mod); - return BigIntMutable.init(&space.limbs, x).toConst(); - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - try sema.resolveTypeLayout(ty); - } - const x = ty.abiSize(mod); - return BigIntMutable.init(&space.limbs, x).toConst(); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |runtime_value| runtime_value.val.toValue().toBigIntAdvanced(space, mod, opt_sema), + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => int.storage.toBigInt(space), + .lazy_align, .lazy_size => |ty| { + if (opt_sema) |sema| try sema.resolveTypeLayout(ty.toType()); + const x = switch (int.storage) { + else => unreachable, + .lazy_align => ty.toType().abiAlignment(mod), + .lazy_size => ty.toType().abiSize(mod), + }; + return BigIntMutable.init(&space.limbs, x).toConst(); + }, }, - - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(mod, opt_sema)).?; - const elem_size = elem_ptr.elem_ty.abiSize(mod); - const new_addr = array_addr + elem_size * elem_ptr.index; - return BigIntMutable.init(&space.limbs, new_addr).toConst(); + .enum_tag => |enum_tag| enum_tag.int.toValue().toBigIntAdvanced(space, mod, opt_sema), + .ptr => |ptr| switch (ptr.len) { + .none => switch (ptr.addr) { + .int => |int| int.toValue().toBigIntAdvanced(space, mod, opt_sema), + .elem => |elem| { + const base_addr = (try elem.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)).?; + const elem_size = ptr.ty.toType().elemType2(mod).abiSize(mod); + const new_addr = base_addr + elem.index * elem_size; + return BigIntMutable.init(&space.limbs, new_addr).toConst(); + }, + else => unreachable, + }, + else => unreachable, }, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| int.storage.toBigInt(space), - .enum_tag => |enum_tag| mod.intern_pool.indexToKey(enum_tag.int).int.storage.toBigInt(space), else => unreachable, }, }; } + pub fn getFunction(val: Value, mod: *Module) ?*Module.Fn { + return mod.funcPtrUnwrap(val.getFunctionIndex(mod)); + } + + pub fn getFunctionIndex(val: Value, mod: *Module) Module.Fn.OptionalIndex { + return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.ip_index) else .none; + } + + pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) { + .extern_func => |extern_func| extern_func, + else => null, + } else null; + } + + pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => |variable| variable, + else => null, + } else null; + } + /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { @@ -825,42 +501,27 @@ pub const Value = struct { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { - switch (val.ip_index) { - .bool_false => return 0, - .bool_true => return 1, + return switch (val.ip_index) { + .bool_false => 0, + .bool_true => 1, .undef => unreachable, - .none => switch (val.tag()) { - .the_only_possible_value, // i0, u0 - => return 0, - - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - return (try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; - } else { - return ty.abiAlignment(mod); - } - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - return (try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar; - } else { - return ty.abiSize(mod); - } - }, - - else => return null, - }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(u64) catch null, .u64 => |x| x, .i64 => |x| std.math.cast(u64, x), + .lazy_align => |ty| if (opt_sema) |sema| + (try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar + else + ty.toType().abiAlignment(mod), + .lazy_size => |ty| if (opt_sema) |sema| + (try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar + else + ty.toType().abiSize(mod), }, else => null, }, - } + }; } /// Asserts the value is an integer and it fits in a u64 @@ -870,58 +531,40 @@ pub const Value = struct { /// Asserts the value is an integer and it fits in a i64 pub fn toSignedInt(val: Value, mod: *Module) i64 { - switch (val.ip_index) { - .bool_false => return 0, - .bool_true => return 1, + return switch (val.ip_index) { + .bool_false => 0, + .bool_true => 1, .undef => unreachable, - .none => switch (val.tag()) { - .the_only_possible_value, // i0, u0 - => return 0, - - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - return @intCast(i64, ty.abiAlignment(mod)); - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - return @intCast(i64, ty.abiSize(mod)); - }, - - else => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(i64) catch unreachable, .i64 => |x| x, .u64 => |x| @intCast(i64, x), + .lazy_align => |ty| @intCast(i64, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @intCast(i64, ty.toType().abiSize(mod)), }, else => unreachable, }, - } + }; } - pub fn toBool(val: Value, mod: *const Module) bool { + pub fn toBool(val: Value, _: *const Module) bool { return switch (val.ip_index) { .bool_true => true, .bool_false => false, - .none => unreachable, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| !big_int.eqZero(), - inline .u64, .i64 => |x| x != 0, - }, - else => unreachable, - }, + else => unreachable, }; } - fn isDeclRef(val: Value) bool { + fn isDeclRef(val: Value, mod: *Module) bool { var check = val; - while (true) switch (check.tag()) { - .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return true, - .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr, - .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr, - .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr, + while (true) switch (mod.intern_pool.indexToKey(check.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => return true, + .eu_payload, .opt_payload => |index| check = index.toValue(), + .elem, .field => |base_index| check = base_index.base.toValue(), + else => return false, + }, else => return false, }; } @@ -953,24 +596,9 @@ pub const Value = struct { const bits = int_info.bits; const byte_count = (bits + 7) / 8; - const int_val = try val.enumToInt(ty, mod); - - if (byte_count <= @sizeOf(u64)) { - const ip_key = mod.intern_pool.indexToKey(int_val.ip_index); - const int: u64 = switch (ip_key.int.storage) { - .u64 => |x| x, - .i64 => |x| @bitCast(u64, x), - .big_int => unreachable, - }; - for (buffer[0..byte_count], 0..) |_, i| switch (endian) { - .Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), - .Big => buffer[byte_count - i - 1] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), - }; - } else { - var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, mod); - bigint.writeTwosComplement(buffer[0..byte_count], endian); - } + var bigint_buffer: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buffer, mod); + bigint.writeTwosComplement(buffer[0..byte_count], endian); }, .Float => switch (ty.floatBits(target)) { 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16, mod)), endian), @@ -1016,7 +644,12 @@ pub const Value = struct { .ErrorSet => { // TODO revisit this when we have the concept of the error tag type const Int = u16; - const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?; + const name = switch (mod.intern_pool.indexToKey(val.ip_index)) { + .err => |err| err.name, + .error_union => |error_union| error_union.val.err_name, + else => unreachable, + }; + const int = mod.global_error_set.get(mod.intern_pool.stringToSlice(name)).?; std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian); }, .Union => switch (ty.containerLayout(mod)) { @@ -1029,7 +662,7 @@ pub const Value = struct { }, .Pointer => { if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout; - if (val.isDeclRef()) return error.ReinterpretDeclRef; + if (val.isDeclRef(mod)) return error.ReinterpretDeclRef; return val.writeToMemory(Type.usize, mod, buffer); }, .Optional => { @@ -1141,14 +774,14 @@ pub const Value = struct { .Packed => { const field_index = ty.unionTagFieldIndex(val.unionTag(mod), mod); const field_type = ty.unionFields(mod).values()[field_index.?].ty; - const field_val = try val.fieldValue(field_type, mod, field_index.?); + const field_val = try val.fieldValue(mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); }, }, .Pointer => { assert(!ty.isSlice(mod)); // No well defined layout. - if (val.isDeclRef()) return error.ReinterpretDeclRef; + if (val.isDeclRef(mod)) return error.ReinterpretDeclRef; return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset); }, .Optional => { @@ -1262,13 +895,11 @@ pub const Value = struct { // TODO revisit this when we have the concept of the error tag type const Int = u16; const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian); - - const payload = try arena.create(Value.Payload.Error); - payload.* = .{ - .base = .{ .tag = .@"error" }, - .data = .{ .name = mod.error_name_list.items[@intCast(usize, int)] }, - }; - return Value.initPayload(&payload.base); + const name = mod.error_name_list.items[@intCast(usize, int)]; + return (try mod.intern(.{ .err = .{ + .ty = ty.ip_index, + .name = mod.intern_pool.getString(name).unwrap().?, + } })).toValue(); }, .Pointer => { assert(!ty.isSlice(mod)); // No well defined layout. @@ -1383,7 +1014,7 @@ pub const Value = struct { } /// Asserts that the value is a float or an integer. - pub fn toFloat(val: Value, comptime T: type, mod: *const Module) T { + pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { return switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), @@ -1393,6 +1024,8 @@ pub const Value = struct { } return @intToFloat(T, x); }, + .lazy_align => |ty| @intToFloat(T, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @intToFloat(T, ty.toType().abiSize(mod)), }, .float => |float| switch (float.storage) { inline else => |x| @floatCast(T, x), @@ -1421,89 +1054,24 @@ pub const Value = struct { } pub fn clz(val: Value, ty: Type, mod: *Module) u64 { - const ty_bits = ty.intInfo(mod).bits; - return switch (val.ip_index) { - .bool_false => ty_bits, - .bool_true => ty_bits - 1, - .none => switch (val.tag()) { - .the_only_possible_value => { - assert(ty_bits == 0); - return ty_bits; - }, - - .lazy_align, .lazy_size => { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; - return bigint.clz(ty_bits); - }, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.clz(ty_bits), - .u64 => |x| @clz(x) + ty_bits - 64, - .i64 => @panic("TODO implement i64 Value clz"), - }, - else => unreachable, - }, - }; + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return bigint.clz(ty.intInfo(mod).bits); } - pub fn ctz(val: Value, ty: Type, mod: *Module) u64 { - const ty_bits = ty.intInfo(mod).bits; - return switch (val.ip_index) { - .bool_false => ty_bits, - .bool_true => 0, - .none => switch (val.tag()) { - .the_only_possible_value => { - assert(ty_bits == 0); - return ty_bits; - }, - - .lazy_align, .lazy_size => { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; - return bigint.ctz(); - }, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.ctz(), - .u64 => |x| { - const big = @ctz(x); - return if (big == 64) ty_bits else big; - }, - .i64 => @panic("TODO implement i64 Value ctz"), - }, - else => unreachable, - }, - }; + pub fn ctz(val: Value, _: Type, mod: *Module) u64 { + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return bigint.ctz(); } pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { - assert(!val.isUndef(mod)); - switch (val.ip_index) { - .bool_false => return 0, - .bool_true => return 1, - .none => unreachable, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| { - const info = ty.intInfo(mod); - var buffer: Value.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - return @intCast(u64, big_int.popCount(info.bits)); - }, - else => unreachable, - }, - } + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return @intCast(u64, bigint.popCount(ty.intInfo(mod).bits)); } pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { - assert(!val.isUndef(mod)); - const info = ty.intInfo(mod); var buffer: Value.BigIntSpace = undefined; @@ -1520,8 +1088,6 @@ pub const Value = struct { } pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { - assert(!val.isUndef(mod)); - const info = ty.intInfo(mod); // Bit count must be evenly divisible by 8 @@ -1543,41 +1109,9 @@ pub const Value = struct { /// Asserts the value is an integer and not undefined. /// Returns the number of bits the value requires to represent stored in twos complement form. pub fn intBitCountTwosComp(self: Value, mod: *Module) usize { - const target = mod.getTarget(); - return switch (self.ip_index) { - .bool_false => 0, - .bool_true => 1, - .none => switch (self.tag()) { - .the_only_possible_value => 0, - - .decl_ref_mut, - .comptime_field_ptr, - .extern_fn, - .decl_ref, - .function, - .variable, - .eu_payload_ptr, - .opt_payload_ptr, - => target.ptrBitWidth(), - - else => { - var buffer: BigIntSpace = undefined; - return self.toBigInt(&buffer, mod).bitCountTwosComp(); - }, - }, - else => switch (mod.intern_pool.indexToKey(self.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.bitCountTwosComp(), - .u64 => |x| if (x == 0) 0 else @intCast(usize, std.math.log2(x) + 1), - .i64 => { - var buffer: Value.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - return big_int.bitCountTwosComp(); - }, - }, - else => unreachable, - }, - }; + var buffer: BigIntSpace = undefined; + const big_int = self.toBigInt(&buffer, mod); + return big_int.bitCountTwosComp(); } /// Converts an integer or a float to a float. May result in a loss of information. @@ -1616,84 +1150,39 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { - switch (lhs.ip_index) { - .bool_false => return .eq, - .bool_true => return .gt, - .none => return switch (lhs.tag()) { - .the_only_possible_value => .eq, - - .decl_ref, - .decl_ref_mut, - .comptime_field_ptr, - .extern_fn, - .function, - .variable, - => .gt, - - .runtime_value => { - // This is needed to correctly handle hashing the value. - // Checks in Sema should prevent direct comparisons from reaching here. - const val = lhs.castTag(.runtime_value).?.data; - return val.orderAgainstZeroAdvanced(mod, opt_sema); - }, - - .lazy_align => { - const ty = lhs.castTag(.lazy_align).?.data; - const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => unreachable, - else => |e| return e, - }) { - return .gt; - } else { - return .eq; - } - }, - .lazy_size => { - const ty = lhs.castTag(.lazy_size).?.data; - const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => unreachable, - else => |e| return e, - }) { - return .gt; - } else { - return .eq; - } - }, - - .elem_ptr => { - const elem_ptr = lhs.castTag(.elem_ptr).?.data; - switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(mod, opt_sema)) { + return switch (lhs.ip_index) { + .bool_false => .eq, + .bool_true => .gt, + else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => .gt, + .int => |int| int.toValue().orderAgainstZeroAdvanced(mod, opt_sema), + .elem => |elem| switch (try elem.base.toValue().orderAgainstZeroAdvanced(mod, opt_sema)) { .lt => unreachable, - .gt => return .gt, - .eq => { - if (elem_ptr.index == 0) { - return .eq; - } else { - return .gt; - } - }, - } + .gt => .gt, + .eq => if (elem.index == 0) .eq else .gt, + }, + else => unreachable, }, - - else => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(lhs.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.orderAgainstScalar(0), inline .u64, .i64 => |x| std.math.order(x, 0), + .lazy_align, .lazy_size => |ty| return if (ty.toType().hasRuntimeBitsAdvanced( + mod, + false, + if (opt_sema) |sema| .{ .sema = sema } else .eager, + ) catch |err| switch (err) { + error.NeedLazy => unreachable, + else => |e| return e, + }) .gt else .eq, }, - .enum_tag => |enum_tag| switch (mod.intern_pool.indexToKey(enum_tag.int).int.storage) { - .big_int => |big_int| big_int.orderAgainstScalar(0), - inline .u64, .i64 => |x| std.math.order(x, 0), - }, + .enum_tag => |enum_tag| enum_tag.int.toValue().orderAgainstZeroAdvanced(mod, opt_sema), .float => |float| switch (float.storage) { inline else => |x| std.math.order(x, 0), }, else => unreachable, }, - } + }; } /// Asserts the value is comparable. @@ -1760,8 +1249,8 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) !bool { - if (lhs.pointerDecl()) |lhs_decl| { - if (rhs.pointerDecl()) |rhs_decl| { + if (lhs.pointerDecl(mod)) |lhs_decl| { + if (rhs.pointerDecl(mod)) |rhs_decl| { switch (op) { .eq => return lhs_decl == rhs_decl, .neq => return lhs_decl != rhs_decl, @@ -1774,7 +1263,7 @@ pub const Value = struct { else => {}, } } - } else if (rhs.pointerDecl()) |_| { + } else if (rhs.pointerDecl(mod)) |_| { switch (op) { .eq => return false, .neq => return true, @@ -1849,7 +1338,6 @@ pub const Value = struct { switch (lhs.ip_index) { .none => switch (lhs.tag()) { - .repeated => return lhs.castTag(.repeated).?.data.compareAllWithZeroAdvancedExtra(op, mod, opt_sema), .aggregate => { for (lhs.castTag(.aggregate).?.data) |elem_val| { if (!(try elem_val.compareAllWithZeroAdvancedExtra(op, mod, opt_sema))) return false; @@ -1877,6 +1365,15 @@ pub const Value = struct { .float => |float| switch (float.storage) { inline else => |x| if (std.math.isNan(x)) return op == .neq, }, + .aggregate => |aggregate| return switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| { + if (!std.math.order(byte, 0).compare(op)) break false; + } else true, + .elems => |elems| for (elems) |elem| { + if (!try elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; + } else true, + .repeated_elem => |elem| elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema), + }, else => {}, }, } @@ -1910,69 +1407,6 @@ pub const Value = struct { const a_tag = a.tag(); const b_tag = b.tag(); if (a_tag == b_tag) switch (a_tag) { - .the_only_possible_value => return true, - .enum_literal => { - const a_name = a.castTag(.enum_literal).?.data; - const b_name = b.castTag(.enum_literal).?.data; - return std.mem.eql(u8, a_name, b_name); - }, - .opt_payload => { - const a_payload = a.castTag(.opt_payload).?.data; - const b_payload = b.castTag(.opt_payload).?.data; - const payload_ty = ty.optionalChild(mod); - return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); - }, - .slice => { - const a_payload = a.castTag(.slice).?.data; - const b_payload = b.castTag(.slice).?.data; - if (!(try eqlAdvanced(a_payload.len, Type.usize, b_payload.len, Type.usize, mod, opt_sema))) { - return false; - } - - const ptr_ty = ty.slicePtrFieldType(mod); - - return eqlAdvanced(a_payload.ptr, ptr_ty, b_payload.ptr, ptr_ty, mod, opt_sema); - }, - .elem_ptr => { - const a_payload = a.castTag(.elem_ptr).?.data; - const b_payload = b.castTag(.elem_ptr).?.data; - if (a_payload.index != b_payload.index) return false; - - return eqlAdvanced(a_payload.array_ptr, ty, b_payload.array_ptr, ty, mod, opt_sema); - }, - .field_ptr => { - const a_payload = a.castTag(.field_ptr).?.data; - const b_payload = b.castTag(.field_ptr).?.data; - if (a_payload.field_index != b_payload.field_index) return false; - - return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema); - }, - .@"error" => { - const a_name = a.castTag(.@"error").?.data.name; - const b_name = b.castTag(.@"error").?.data.name; - return std.mem.eql(u8, a_name, b_name); - }, - .eu_payload => { - const a_payload = a.castTag(.eu_payload).?.data; - const b_payload = b.castTag(.eu_payload).?.data; - const payload_ty = ty.errorUnionPayload(mod); - return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); - }, - .eu_payload_ptr => { - const a_payload = a.castTag(.eu_payload_ptr).?.data; - const b_payload = b.castTag(.eu_payload_ptr).?.data; - return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema); - }, - .opt_payload_ptr => { - const a_payload = a.castTag(.opt_payload_ptr).?.data; - const b_payload = b.castTag(.opt_payload_ptr).?.data; - return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema); - }, - .function => { - const a_payload = a.castTag(.function).?.data; - const b_payload = b.castTag(.function).?.data; - return a_payload == b_payload; - }, .aggregate => { const a_field_vals = a.castTag(.aggregate).?.data; const b_field_vals = b.castTag(.aggregate).?.data; @@ -2035,17 +1469,15 @@ pub const Value = struct { return eqlAdvanced(a_union.val, active_field_ty, b_union.val, active_field_ty, mod, opt_sema); }, else => {}, - } else if (b_tag == .@"error") { - return false; - } + }; - if (a.pointerDecl()) |a_decl| { - if (b.pointerDecl()) |b_decl| { + if (a.pointerDecl(mod)) |a_decl| { + if (b.pointerDecl(mod)) |b_decl| { return a_decl == b_decl; } else { return false; } - } else if (b.pointerDecl()) |_| { + } else if (b.pointerDecl(mod)) |_| { return false; } @@ -2130,25 +1562,11 @@ pub const Value = struct { if (a_nan) return true; return a_float == b_float; }, - .Optional => if (b_tag == .opt_payload) { - var sub_pl: Payload.SubValue = .{ - .base = .{ .tag = b.tag() }, - .data = a, - }; - const sub_val = Value.initPayload(&sub_pl.base); - return eqlAdvanced(sub_val, ty, b, ty, mod, opt_sema); - }, - .ErrorUnion => if (a_tag != .@"error" and b_tag == .eu_payload) { - var sub_pl: Payload.SubValue = .{ - .base = .{ .tag = b.tag() }, - .data = a, - }; - const sub_val = Value.initPayload(&sub_pl.base); - return eqlAdvanced(sub_val, ty, b, ty, mod, opt_sema); - }, + .Optional, + .ErrorUnion, + => unreachable, // handled by InternPool else => {}, } - if (a_tag == .@"error") return false; return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq); } @@ -2166,7 +1584,7 @@ pub const Value = struct { std.hash.autoHash(hasher, zig_ty_tag); if (val.isUndef(mod)) return; // The value is runtime-known and shouldn't affect the hash. - if (val.isRuntimeValue()) return; + if (val.isRuntimeValue(mod)) return; switch (zig_ty_tag) { .Opaque => unreachable, // Cannot hash opaque types @@ -2177,38 +1595,20 @@ pub const Value = struct { .Null, => {}, - .Type => unreachable, // handled via ip_index check above - .Float => { - // For hash/eql purposes, we treat floats as their IEEE integer representation. - switch (ty.floatBits(mod.getTarget())) { - 16 => std.hash.autoHash(hasher, @bitCast(u16, val.toFloat(f16, mod))), - 32 => std.hash.autoHash(hasher, @bitCast(u32, val.toFloat(f32, mod))), - 64 => std.hash.autoHash(hasher, @bitCast(u64, val.toFloat(f64, mod))), - 80 => std.hash.autoHash(hasher, @bitCast(u80, val.toFloat(f80, mod))), - 128 => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), - else => unreachable, - } - }, - .ComptimeFloat => { - const float = val.toFloat(f128, mod); - const is_nan = std.math.isNan(float); - std.hash.autoHash(hasher, is_nan); - if (!is_nan) { - std.hash.autoHash(hasher, @bitCast(u128, float)); - } else { - std.hash.autoHash(hasher, std.math.signbit(float)); - } - }, - .Bool, .Int, .ComptimeInt, .Pointer => switch (val.tag()) { - .slice => { - const slice = val.castTag(.slice).?.data; - const ptr_ty = ty.slicePtrFieldType(mod); - hash(slice.ptr, ptr_ty, hasher, mod); - hash(slice.len, Type.usize, hasher, mod); - }, - - else => return hashPtr(val, hasher, mod), - }, + .Type, + .Float, + .ComptimeFloat, + .Bool, + .Int, + .ComptimeInt, + .Pointer, + .Optional, + .ErrorUnion, + .ErrorSet, + .Enum, + .EnumLiteral, + .Fn, + => unreachable, // handled via ip_index check above .Array, .Vector => { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); @@ -2233,42 +1633,6 @@ pub const Value = struct { else => unreachable, } }, - .Optional => { - if (val.castTag(.opt_payload)) |payload| { - std.hash.autoHash(hasher, true); // non-null - const sub_val = payload.data; - const sub_ty = ty.optionalChild(mod); - sub_val.hash(sub_ty, hasher, mod); - } else { - std.hash.autoHash(hasher, false); // null - } - }, - .ErrorUnion => { - if (val.tag() == .@"error") { - std.hash.autoHash(hasher, false); // error - const sub_ty = ty.errorUnionSet(mod); - val.hash(sub_ty, hasher, mod); - return; - } - - if (val.castTag(.eu_payload)) |payload| { - std.hash.autoHash(hasher, true); // payload - const sub_ty = ty.errorUnionPayload(mod); - payload.data.hash(sub_ty, hasher, mod); - return; - } else unreachable; - }, - .ErrorSet => { - // just hash the literal error value. this is the most stable - // thing between compiler invocations. we can't use the error - // int cause (1) its not stable and (2) we don't have access to mod. - hasher.update(val.getError().?); - }, - .Enum => { - // This panic will go away when enum values move to be stored in the intern pool. - const int_val = val.enumToInt(ty, mod) catch @panic("OOM"); - hashInt(int_val, hasher, mod); - }, .Union => { const union_obj = val.cast(Payload.Union).?.data; if (ty.unionTagType(mod)) |tag_ty| { @@ -2277,27 +1641,12 @@ pub const Value = struct { const active_field_ty = ty.unionFieldType(union_obj.tag, mod); union_obj.val.hash(active_field_ty, hasher, mod); }, - .Fn => { - // Note that this hashes the *Fn/*ExternFn rather than the *Decl. - // This is to differentiate function bodies from function pointers. - // This is currently redundant since we already hash the zig type tag - // at the top of this function. - if (val.castTag(.function)) |func| { - std.hash.autoHash(hasher, func.data); - } else if (val.castTag(.extern_fn)) |func| { - std.hash.autoHash(hasher, func.data); - } else unreachable; - }, .Frame => { @panic("TODO implement hashing frame values"); }, .AnyFrame => { @panic("TODO implement hashing anyframe values"); }, - .EnumLiteral => { - const bytes = val.castTag(.enum_literal).?.data; - hasher.update(bytes); - }, } } @@ -2308,7 +1657,7 @@ pub const Value = struct { pub fn hashUncoerced(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { if (val.isUndef(mod)) return; // The value is runtime-known and shouldn't affect the hash. - if (val.isRuntimeValue()) return; + if (val.isRuntimeValue(mod)) return; if (val.ip_index != .none) { // The InternPool data structure hashes based on Key to make interned objects @@ -2326,16 +1675,20 @@ pub const Value = struct { .Null, .Struct, // It sure would be nice to do something clever with structs. => |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag), - .Type => unreachable, // handled above with the ip_index check - .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), - .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { - .slice => { - const slice = val.castTag(.slice).?.data; - const ptr_ty = ty.slicePtrFieldType(mod); - slice.ptr.hashUncoerced(ptr_ty, hasher, mod); - }, - else => val.hashPtr(hasher, mod), - }, + .Type, + .Float, + .ComptimeFloat, + .Bool, + .Int, + .ComptimeInt, + .Pointer, + .Fn, + .Optional, + .ErrorSet, + .ErrorUnion, + .Enum, + .EnumLiteral, + => unreachable, // handled above with the ip_index check .Array, .Vector => { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); @@ -2348,21 +1701,16 @@ pub const Value = struct { elem_val.hashUncoerced(elem_ty, hasher, mod); } }, - .Optional => if (val.castTag(.opt_payload)) |payload| { - const child_ty = ty.optionalChild(mod); - payload.data.hashUncoerced(child_ty, hasher, mod); - } else std.hash.autoHash(hasher, std.builtin.TypeId.Null), - .ErrorSet, .ErrorUnion => if (val.getError()) |err| hasher.update(err) else { - const pl_ty = ty.errorUnionPayload(mod); - val.castTag(.eu_payload).?.data.hashUncoerced(pl_ty, hasher, mod); - }, - .Enum, .EnumLiteral, .Union => { - hasher.update(val.tagName(ty, mod)); - if (val.cast(Payload.Union)) |union_obj| { - const active_field_ty = ty.unionFieldType(union_obj.data.tag, mod); - union_obj.data.val.hashUncoerced(active_field_ty, hasher, mod); - } else std.hash.autoHash(hasher, std.builtin.TypeId.Void); - }, + .Union => { + hasher.update(val.tagName(mod)); + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .un => |un| { + const active_field_ty = ty.unionFieldType(un.tag.toValue(), mod); + un.val.toValue().hashUncoerced(active_field_ty, hasher, mod); + }, + else => std.hash.autoHash(hasher, std.builtin.TypeId.Void), + } + }, .Frame => @panic("TODO implement hashing frame values"), .AnyFrame => @panic("TODO implement hashing anyframe values"), } @@ -2397,57 +1745,53 @@ pub const Value = struct { } }; - pub fn isComptimeMutablePtr(val: Value) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .decl_ref_mut, .comptime_field_ptr => true, - .elem_ptr => isComptimeMutablePtr(val.castTag(.elem_ptr).?.data.array_ptr), - .field_ptr => isComptimeMutablePtr(val.castTag(.field_ptr).?.data.container_ptr), - .eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data.container_ptr), - .opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data.container_ptr), - .slice => isComptimeMutablePtr(val.castTag(.slice).?.data.ptr), - + pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .mut_decl, .comptime_field => true, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isComptimeMutablePtr(mod), + .elem, .field => |base_index| base_index.base.toValue().isComptimeMutablePtr(mod), else => false, }, else => false, }; } - pub fn canMutateComptimeVarState(val: Value) bool { - if (val.isComptimeMutablePtr()) return true; - return switch (val.ip_index) { - .none => switch (val.tag()) { - .repeated => return val.castTag(.repeated).?.data.canMutateComptimeVarState(), - .eu_payload => return val.castTag(.eu_payload).?.data.canMutateComptimeVarState(), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), - .opt_payload => return val.castTag(.opt_payload).?.data.canMutateComptimeVarState(), - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), - .aggregate => { - const fields = val.castTag(.aggregate).?.data; - for (fields) |field| { - if (field.canMutateComptimeVarState()) return true; - } - return false; + pub fn canMutateComptimeVarState(val: Value, mod: *Module) bool { + return val.isComptimeMutablePtr(mod) or switch (val.ip_index) { + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .error_union => |error_union| switch (error_union.val) { + .err_name => false, + .payload => |payload| payload.toValue().canMutateComptimeVarState(mod), }, - .@"union" => return val.cast(Payload.Union).?.data.val.canMutateComptimeVarState(), - .slice => return val.castTag(.slice).?.data.ptr.canMutateComptimeVarState(), - else => return false, + .ptr => |ptr| switch (ptr.addr) { + .eu_payload, .opt_payload => |base| base.toValue().canMutateComptimeVarState(mod), + else => false, + }, + .opt => |opt| switch (opt.val) { + .none => false, + else => opt.val.toValue().canMutateComptimeVarState(mod), + }, + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| { + if (elem.toValue().canMutateComptimeVarState(mod)) break true; + } else false, + .un => |un| un.val.toValue().canMutateComptimeVarState(mod), + else => false, }, - else => return false, }; } /// Gets the decl referenced by this pointer. If the pointer does not point /// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr), /// this function returns null. - pub fn pointerDecl(val: Value) ?Module.Decl.Index { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl_index, - .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, - .function => val.castTag(.function).?.data.owner_decl, - .variable => val.castTag(.variable).?.data.owner_decl, - .decl_ref => val.cast(Payload.Decl).?.data, + pub fn pointerDecl(val: Value, mod: *Module) ?Module.Decl.Index { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => |variable| variable.decl, + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, else => null, }, else => null, @@ -2463,95 +1807,15 @@ pub const Value = struct { } } - fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *Module) void { - switch (ptr_val.tag()) { - .decl_ref, - .decl_ref_mut, - .extern_fn, - .function, - .variable, - => { - const decl: Module.Decl.Index = ptr_val.pointerDecl().?; - std.hash.autoHash(hasher, decl); - }, - .comptime_field_ptr => { - std.hash.autoHash(hasher, Value.Tag.comptime_field_ptr); - }, - - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - hashPtr(elem_ptr.array_ptr, hasher, mod); - std.hash.autoHash(hasher, Value.Tag.elem_ptr); - std.hash.autoHash(hasher, elem_ptr.index); - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - std.hash.autoHash(hasher, Value.Tag.field_ptr); - hashPtr(field_ptr.container_ptr, hasher, mod); - std.hash.autoHash(hasher, field_ptr.field_index); - }, - .eu_payload_ptr => { - const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - std.hash.autoHash(hasher, Value.Tag.eu_payload_ptr); - hashPtr(err_union_ptr.container_ptr, hasher, mod); - }, - .opt_payload_ptr => { - const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - std.hash.autoHash(hasher, Value.Tag.opt_payload_ptr); - hashPtr(opt_ptr.container_ptr, hasher, mod); - }, - - .the_only_possible_value, - .lazy_align, - .lazy_size, - => return hashInt(ptr_val, hasher, mod), - - else => unreachable, - } - } + pub const slice_ptr_index = 0; + pub const slice_len_index = 1; pub fn slicePtr(val: Value, mod: *Module) Value { - if (val.ip_index != .none) return mod.intern_pool.slicePtr(val.ip_index).toValue(); - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr, - // TODO this should require being a slice tag, and not allow decl_ref, field_ptr, etc. - .decl_ref, .decl_ref_mut, .field_ptr, .elem_ptr, .comptime_field_ptr => val, - else => unreachable, - }; + return mod.intern_pool.slicePtr(val.ip_index).toValue(); } pub fn sliceLen(val: Value, mod: *Module) u64 { - if (val.ip_index != .none) return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod); - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod), - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag(mod) == .Array) { - return decl.ty.arrayLen(mod); - } else { - return 1; - } - }, - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag(mod) == .Array) { - return decl.ty.arrayLen(mod); - } else { - return 1; - } - }, - .comptime_field_ptr => { - const payload = val.castTag(.comptime_field_ptr).?.data; - if (payload.field_ty.zigTypeTag(mod) == .Array) { - return payload.field_ty.arrayLen(mod); - } else { - return 1; - } - }, - else => unreachable, - }; + return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod); } /// Asserts the value is a single-item pointer to an array, or an array, @@ -2560,14 +1824,6 @@ pub const Value = struct { switch (val.ip_index) { .undef => return Value.undef, .none => switch (val.tag()) { - // This is the case of accessing an element of an undef array. - .empty_array => unreachable, // out of bounds array index - - .empty_array_sentinel => { - assert(index == 0); // The only valid index for an empty array with sentinel. - return val.castTag(.empty_array_sentinel).?.data; - }, - .bytes => { const byte = val.castTag(.bytes).?.data[index]; return mod.intValue(Type.u8, byte); @@ -2579,128 +1835,101 @@ pub const Value = struct { return mod.intValue(Type.u8, byte); }, - // No matter the index; all the elements are the same! - .repeated => return val.castTag(.repeated).?.data, - .aggregate => return val.castTag(.aggregate).?.data[index], - .slice => return val.castTag(.slice).?.data.ptr.elemValue(mod, index), - - .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValue(mod, index), - .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValue(mod, index), - .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValue(mod, index), - .elem_ptr => { - const data = val.castTag(.elem_ptr).?.data; - return data.array_ptr.elemValue(mod, index + data.index); - }, - .field_ptr => { - const data = val.castTag(.field_ptr).?.data; - if (data.container_ptr.pointerDecl()) |decl_index| { - const container_decl = mod.declPtr(decl_index); - const field_type = data.container_ty.structFieldType(data.field_index, mod); - const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index); - return field_val.elemValue(mod, index); - } else unreachable; - }, - - // The child type of arrays which have only one possible value need - // to have only one possible value itself. - .the_only_possible_value => return val, - - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValue(mod, index), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValue(mod, index), - - .opt_payload => return val.castTag(.opt_payload).?.data.elemValue(mod, index), - .eu_payload => return val.castTag(.eu_payload).?.data.elemValue(mod, index), else => unreachable, }, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { .ptr => |ptr| switch (ptr.addr) { - .@"var" => unreachable, .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), .int, .eu_payload, .opt_payload => unreachable, .comptime_field => |field_val| field_val.toValue().elemValue(mod, index), .elem => |elem| elem.base.toValue().elemValue(mod, index + elem.index), - .field => unreachable, - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .elems => |elems| elems[index].toValue(), - .repeated_elem => |elem| elem.toValue(), + .field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| { + const base_decl = mod.declPtr(decl_index); + const field_val = try base_decl.val.fieldValue(mod, field.index); + return field_val.elemValue(mod, index); + } else unreachable, + }, + .aggregate => |aggregate| { + const len = mod.intern_pool.aggregateTypeLen(aggregate.ty); + if (index < len) return switch (aggregate.storage) { + .bytes => |bytes| try mod.intern(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }.toValue(); + assert(index == len); + return mod.intern_pool.indexToKey(aggregate.ty).array_type.sentinel.toValue(); }, else => unreachable, }, } } - pub fn isLazyAlign(val: Value) bool { - return val.ip_index == .none and val.tag() == .lazy_align; - } - - pub fn isLazySize(val: Value) bool { - return val.ip_index == .none and val.tag() == .lazy_size; + pub fn isLazyAlign(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| int.storage == .lazy_align, + else => false, + }; } - pub fn isRuntimeValue(val: Value) bool { - return val.ip_index == .none and val.tag() == .runtime_value; + pub fn isLazySize(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| int.storage == .lazy_size, + else => false, + }; } - pub fn tagIsVariable(val: Value) bool { - return val.ip_index == .none and val.tag() == .variable; + pub fn isRuntimeValue(val: Value, mod: *Module) bool { + return mod.intern_pool.indexToKey(val.ip_index) == .runtime_value; } /// Returns true if a Value is backed by a variable pub fn isVariable(val: Value, mod: *Module) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr.isVariable(mod), - .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod), - .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isVariable(mod), - .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isVariable(mod), - .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isVariable(mod), - .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isVariable(mod), - .decl_ref => { - const decl = mod.declPtr(val.castTag(.decl_ref).?.data); + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => true, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); assert(decl.has_tv); return decl.val.isVariable(mod); }, - .decl_ref_mut => { - const decl = mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index); + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); assert(decl.has_tv); return decl.val.isVariable(mod); }, - - .variable => true, - else => false, + .int => false, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isVariable(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isVariable(mod), + .elem, .field => |base_index| base_index.base.toValue().isVariable(mod), }, else => false, }; } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .variable => false, - else => val.isPtrToThreadLocalInner(mod), - }, - else => val.isPtrToThreadLocalInner(mod), - }; - } - - fn isPtrToThreadLocalInner(val: Value, mod: *Module) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr.isPtrToThreadLocalInner(mod), - .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isPtrToThreadLocalInner(mod), - .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isPtrToThreadLocalInner(mod), - .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .decl_ref => mod.declPtr(val.castTag(.decl_ref).?.data).val.isPtrToThreadLocalInner(mod), - .decl_ref_mut => mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.isPtrToThreadLocalInner(mod), - - .variable => val.castTag(.variable).?.data.is_threadlocal, - else => false, + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => |variable| variable.is_threadlocal, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .int => false, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod), + .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod), }, else => false, }; @@ -2714,39 +1943,42 @@ pub const Value = struct { start: usize, end: usize, ) error{OutOfMemory}!Value { - return switch (val.tag()) { - .empty_array_sentinel => if (start == 0 and end == 1) val else Value.initTag(.empty_array), - .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - return Tag.str_lit.create(arena, .{ - .index = @intCast(u32, str_lit.index + start), - .len = @intCast(u32, end - start), - }); + return switch (val.ip_index) { + .none => switch (val.tag()) { + .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), + .str_lit => { + const str_lit = val.castTag(.str_lit).?.data; + return Tag.str_lit.create(arena, .{ + .index = @intCast(u32, str_lit.index + start), + .len = @intCast(u32, end - start), + }); + }, + else => unreachable, }, - .aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]), - .slice => sliceArray(val.castTag(.slice).?.data.ptr, mod, arena, start, end), - - .decl_ref => sliceArray(mod.declPtr(val.castTag(.decl_ref).?.data).val, mod, arena, start, end), - .decl_ref_mut => sliceArray(mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val, mod, arena, start, end), - .comptime_field_ptr => sliceArray(val.castTag(.comptime_field_ptr).?.data.field_val, mod, arena, start, end), - .elem_ptr => blk: { - const elem_ptr = val.castTag(.elem_ptr).?.data; - break :blk sliceArray(elem_ptr.array_ptr, mod, arena, start + elem_ptr.index, end + elem_ptr.index); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), + .mut_decl => |mut_decl| try mod.declPtr(mut_decl.decl).val.sliceArray(mod, arena, start, end), + .comptime_field => |comptime_field| try comptime_field.toValue().sliceArray(mod, arena, start, end), + .elem => |elem| try elem.base.toValue().sliceArray(mod, arena, start + elem.index, end + elem.index), + else => unreachable, + }, + .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ + .ty = mod.intern_pool.typeOf(val.ip_index), + .storage = switch (aggregate.storage) { + .bytes => |bytes| .{ .bytes = bytes[start..end] }, + .elems => |elems| .{ .elems = elems[start..end] }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, + }, + } })).toValue(), + else => unreachable, }, - - .repeated, - .the_only_possible_value, - => val, - - else => unreachable, }; } - pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value { + pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { switch (val.ip_index) { .undef => return Value.undef, - .none => switch (val.tag()) { .aggregate => { const field_values = val.castTag(.aggregate).?.data; @@ -2757,13 +1989,14 @@ pub const Value = struct { // TODO assert the tag is correct return payload.val; }, - - .the_only_possible_value => return (try ty.onePossibleValue(mod)).?, - else => unreachable, }, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try mod.intern(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[index] }, + } }), .elems => |elems| elems[index], .repeated_elem => |elem| elem, }.toValue(), @@ -2785,40 +2018,37 @@ pub const Value = struct { pub fn elemPtr( val: Value, ty: Type, - arena: Allocator, index: usize, mod: *Module, ) Allocator.Error!Value { const elem_ty = ty.elemType2(mod); - const ptr_val = switch (val.ip_index) { - .none => switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr, - else => val, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .ptr => |ptr| switch (ptr.len) { + const ptr_val = switch (mod.intern_pool.indexToKey(val.ip_index)) { + .ptr => |ptr| ptr: { + switch (ptr.addr) { + .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod)) + return (try mod.intern(.{ .ptr = .{ + .ty = ty.ip_index, + .addr = .{ .elem = .{ + .base = elem.base, + .index = elem.index + index, + } }, + } })).toValue(), + else => {}, + } + break :ptr switch (ptr.len) { .none => val, else => val.slicePtr(mod), - }, - else => val, + }; }, + else => val, }; - - if (ptr_val.ip_index == .none and ptr_val.tag() == .elem_ptr) { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - if (elem_ptr.elem_ty.eql(elem_ty, mod)) { - return Tag.elem_ptr.create(arena, .{ - .array_ptr = elem_ptr.array_ptr, - .elem_ty = elem_ptr.elem_ty, - .index = elem_ptr.index + index, - }); - } - } - return Tag.elem_ptr.create(arena, .{ - .array_ptr = ptr_val, - .elem_ty = elem_ty, - .index = index, - }); + return (try mod.intern(.{ .ptr = .{ + .ty = ty.ip_index, + .addr = .{ .elem = .{ + .base = ptr_val.ip_index, + .index = index, + } }, + } })).toValue(); } pub fn isUndef(val: Value, mod: *Module) bool { @@ -2840,69 +2070,44 @@ pub const Value = struct { /// Returns true if any value contained in `self` is undefined. pub fn anyUndef(val: Value, mod: *Module) !bool { if (val.ip_index == .none) return false; - switch (val.ip_index) { - .undef => return true, + return switch (val.ip_index) { + .undef => true, .none => switch (val.tag()) { - .slice => { - const payload = val.castTag(.slice).?; - const len = payload.data.len.toUnsignedInt(mod); - - for (0..len) |i| { - const elem_val = try payload.data.ptr.elemValue(mod, i); - if (try elem_val.anyUndef(mod)) return true; - } - }, - - .aggregate => { - const payload = val.castTag(.aggregate).?; - for (payload.data) |field| { - if (try field.anyUndef(mod)) return true; - } - }, - else => {}, + .aggregate => for (val.castTag(.aggregate).?.data) |field| { + if (try field.anyUndef(mod)) break true; + } else false, + else => false, }, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .undef => return true, - .simple_value => |v| if (v == .undefined) return true, - .aggregate => |aggregate| switch (aggregate.storage) { - .elems => |elems| for (elems) |elem| { - if (try anyUndef(elem.toValue(), mod)) return true; - }, - .repeated_elem => |elem| if (try anyUndef(elem.toValue(), mod)) return true, - }, - else => {}, + .undef => true, + .simple_value => |v| v == .undefined, + .ptr => |ptr| switch (ptr.len) { + .none => false, + else => for (0..@intCast(usize, ptr.len.toValue().toUnsignedInt(mod))) |index| { + if (try (try val.elemValue(mod, index)).anyUndef(mod)) break true; + } else false, + }, + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| { + if (try anyUndef(elem.toValue(), mod)) break true; + } else false, + else => false, }, - } - - return false; + }; } /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. - pub fn isNull(val: Value, mod: *const Module) bool { + pub fn isNull(val: Value, mod: *Module) bool { return switch (val.ip_index) { .undef => unreachable, .unreachable_value => unreachable, .null_value => true, - .none => switch (val.tag()) { - .opt_payload => false, - - // If it's not one of those two tags then it must be a C pointer value, - // in which case the value 0 is null and other values are non-null. - - .the_only_possible_value => true, - - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - - else => false, - }, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.eqZero(), - inline .u64, .i64 => |x| x == 0, + .int => { + var buf: BigIntSpace = undefined; + return val.toBigInt(&buf, mod).eqZero(); }, .opt => |opt| opt.val == .none, else => false, @@ -2914,53 +2119,28 @@ pub const Value = struct { /// unreachable. For error unions, prefer `errorUnionIsPayload` to find out whether /// something is an error or not because it works without having to figure out the /// string. - pub fn getError(self: Value) ?[]const u8 { - return switch (self.ip_index) { - .undef => unreachable, - .unreachable_value => unreachable, - .none => switch (self.tag()) { - .@"error" => self.castTag(.@"error").?.data.name, - .eu_payload => null, - - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - else => unreachable, + pub fn getError(self: Value, mod: *const Module) ?[]const u8 { + return mod.intern_pool.stringToSliceUnwrap(switch (mod.intern_pool.indexToKey(self.ip_index)) { + .err => |err| err.name.toOptional(), + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| err_name.toOptional(), + .payload => .none, }, else => unreachable, - }; + }); } /// Assumes the type is an error union. Returns true if and only if the value is /// the error union payload, not an error. - pub fn errorUnionIsPayload(val: Value) bool { - return switch (val.ip_index) { - .undef => unreachable, - .none => switch (val.tag()) { - .eu_payload => true, - else => false, - - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - }, - else => false, - }; + pub fn errorUnionIsPayload(val: Value, mod: *const Module) bool { + return mod.intern_pool.indexToKey(val.ip_index).error_union.val == .payload; } /// Value of the optional, null if optional has no payload. pub fn optionalValue(val: Value, mod: *const Module) ?Value { - return switch (val.ip_index) { - .none => if (val.isNull(mod)) null - // Valid for optional representation to be the direct value - // and not use opt_payload. - else if (val.castTag(.opt_payload)) |p| p.data else val, - .null_value => null, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .opt => |opt| switch (opt.val) { - .none => null, - else => opt.val.toValue(), - }, - else => unreachable, - }, + return switch (mod.intern_pool.indexToKey(val.ip_index).opt.val) { + .none => null, + else => |index| index.toValue(), }; } @@ -3001,28 +2181,8 @@ pub const Value = struct { } pub fn intToFloatScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - switch (val.ip_index) { - .undef => return val, - .none => switch (val.tag()) { - .the_only_possible_value => return mod.floatValue(float_ty, 0), // for i0, u0 - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return intToFloatInner(ty.abiAlignment(mod), float_ty, mod); - } - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return intToFloatInner(ty.abiSize(mod), float_ty, mod); - } - }, - else => unreachable, - }, + return switch (val.ip_index) { + .undef => val, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| { @@ -3030,10 +2190,20 @@ pub const Value = struct { return mod.floatValue(float_ty, float); }, inline .u64, .i64 => |x| intToFloatInner(x, float_ty, mod), + .lazy_align => |ty| if (opt_sema) |sema| { + return intToFloatInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); + } else { + return intToFloatInner(ty.toType().abiAlignment(mod), float_ty, mod); + }, + .lazy_size => |ty| if (opt_sema) |sema| { + return intToFloatInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); + } else { + return intToFloatInner(ty.toType().abiSize(mod), float_ty, mod); + }, }, else => unreachable, }, - } + }; } fn intToFloatInner(x: anytype, dest_ty: Type, mod: *Module) !Value { @@ -4768,81 +3938,6 @@ pub const Value = struct { pub const Payload = struct { tag: Tag, - pub const Function = struct { - base: Payload, - data: *Module.Fn, - }; - - pub const ExternFn = struct { - base: Payload, - data: *Module.ExternFn, - }; - - pub const Decl = struct { - base: Payload, - data: Module.Decl.Index, - }; - - pub const Variable = struct { - base: Payload, - data: *Module.Var, - }; - - pub const SubValue = struct { - base: Payload, - data: Value, - }; - - pub const DeclRefMut = struct { - pub const base_tag = Tag.decl_ref_mut; - - base: Payload = Payload{ .tag = base_tag }, - data: Data, - - pub const Data = struct { - decl_index: Module.Decl.Index, - runtime_index: RuntimeIndex, - }; - }; - - pub const PayloadPtr = struct { - base: Payload, - data: struct { - container_ptr: Value, - container_ty: Type, - }, - }; - - pub const ComptimeFieldPtr = struct { - base: Payload, - data: struct { - field_val: Value, - field_ty: Type, - }, - }; - - pub const ElemPtr = struct { - pub const base_tag = Tag.elem_ptr; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - array_ptr: Value, - elem_ty: Type, - index: usize, - }, - }; - - pub const FieldPtr = struct { - pub const base_tag = Tag.field_ptr; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - container_ptr: Value, - container_ty: Type, - field_index: usize, - }, - }; - pub const Bytes = struct { base: Payload, /// Includes the sentinel, if any. @@ -4861,32 +3956,6 @@ pub const Value = struct { data: []Value, }; - pub const Slice = struct { - base: Payload, - data: struct { - ptr: Value, - len: Value, - }, - - pub const ptr_index = 0; - pub const len_index = 1; - }; - - pub const Ty = struct { - base: Payload, - data: Type, - }; - - pub const Error = struct { - base: Payload = .{ .tag = .@"error" }, - data: struct { - /// `name` is owned by `Module` and will be valid for the entire - /// duration of the compilation. - /// TODO revisit this when we have the concept of the error tag type - name: []const u8, - }, - }; - pub const InferredAlloc = struct { pub const base_tag = Tag.inferred_alloc; diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 6cccf77ee0e9..555cda135d79 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -533,8 +533,8 @@ def type_Type_SummaryProvider(value, _=None): 'empty_struct_literal': lambda payload: '@TypeOf(.{})', 'anyerror_void_error_union': lambda payload: 'anyerror!void', - 'const_slice_u8': lambda payload: '[]const u8', - 'const_slice_u8_sentinel_0': lambda payload: '[:0]const u8', + 'slice_const_u8': lambda payload: '[]const u8', + 'slice_const_u8_sentinel_0': lambda payload: '[:0]const u8', 'fn_noreturn_no_args': lambda payload: 'fn() noreturn', 'fn_void_no_args': lambda payload: 'fn() void', 'fn_naked_noreturn_no_args': lambda payload: 'fn() callconv(.Naked) noreturn', @@ -560,7 +560,7 @@ def type_Type_SummaryProvider(value, _=None): 'many_mut_pointer': lambda payload: '[*]%s' % type_Type_SummaryProvider(payload), 'c_const_pointer': lambda payload: '[*c]const %s' % type_Type_SummaryProvider(payload), 'c_mut_pointer': lambda payload: '[*c]%s' % type_Type_SummaryProvider(payload), - 'const_slice': lambda payload: '[]const %s' % type_Type_SummaryProvider(payload), + 'slice_const': lambda payload: '[]const %s' % type_Type_SummaryProvider(payload), 'mut_slice': lambda payload: '[]%s' % type_Type_SummaryProvider(payload), 'int_signed': lambda payload: 'i%d' % payload.unsigned, 'int_unsigned': lambda payload: 'u%d' % payload.unsigned, diff --git a/tools/stage2_gdb_pretty_printers.py b/tools/stage2_gdb_pretty_printers.py index bd64916536f4..f10e92485523 100644 --- a/tools/stage2_gdb_pretty_printers.py +++ b/tools/stage2_gdb_pretty_printers.py @@ -18,7 +18,7 @@ class TypePrinter: 'many_mut_pointer': 'Type.Payload.ElemType', 'c_const_pointer': 'Type.Payload.ElemType', 'c_mut_pointer': 'Type.Payload.ElemType', - 'const_slice': 'Type.Payload.ElemType', + 'slice_const': 'Type.Payload.ElemType', 'mut_slice': 'Type.Payload.ElemType', 'optional': 'Type.Payload.ElemType', 'optional_single_mut_pointer': 'Type.Payload.ElemType', From 1a4626d2cf8b9985833f97b6fea6ea03011ada4e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 25 May 2023 05:47:25 -0400 Subject: [PATCH 096/205] InternPool: remove more legacy values Reinstate some tags that will be needed for comptime init. --- src/Air.zig | 7 - src/InternPool.zig | 60 +- src/Liveness.zig | 8 +- src/Liveness/Verify.zig | 5 +- src/Module.zig | 266 ++-- src/Sema.zig | 2498 ++++++++++++++++++---------------- src/TypedValue.zig | 24 +- src/arch/aarch64/CodeGen.zig | 2 - src/arch/arm/CodeGen.zig | 2 - src/arch/riscv64/CodeGen.zig | 2 - src/arch/sparc64/CodeGen.zig | 2 - src/arch/wasm/CodeGen.zig | 27 +- src/arch/x86_64/CodeGen.zig | 5 +- src/codegen.zig | 144 -- src/codegen/c.zig | 21 +- src/codegen/llvm.zig | 326 +---- src/codegen/spirv.zig | 73 +- src/print_air.zig | 3 +- src/value.zig | 979 +++++++------ 19 files changed, 2063 insertions(+), 2391 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 9dcbe174ecdc..4f36cf8bc138 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -400,8 +400,6 @@ pub const Inst = struct { /// A comptime-known value. Uses the `ty_pl` field, payload is index of /// `values` array. constant, - /// A comptime-known type. Uses the `ty` field. - const_ty, /// A comptime-known value via an index into the InternPool. /// Uses the `interned` field. interned, @@ -1257,8 +1255,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .error_set_has_value, => return Type.bool, - .const_ty => return Type.type, - .alloc, .ret_ptr, .err_return_trace, @@ -1435,7 +1431,6 @@ pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); return switch (air_tags[inst_index]) { - .const_ty => air_datas[inst_index].ty, .interned => air_datas[inst_index].interned.toType(), else => unreachable, }; @@ -1501,7 +1496,6 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { .constant => return air.values[air_datas[inst_index].ty_pl.payload], - .const_ty => unreachable, .interned => return air_datas[inst_index].interned.toValue(), else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), } @@ -1658,7 +1652,6 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { .cmp_vector, .cmp_vector_optimized, .constant, - .const_ty, .interned, .is_null, .is_non_null, diff --git a/src/InternPool.zig b/src/InternPool.zig index ec4d1df45fdf..1dc43a467d22 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -553,10 +553,10 @@ pub const Key = union(enum) { pub const Addr = union(enum) { decl: Module.Decl.Index, mut_decl: MutDecl, + comptime_field: Index, int: Index, eu_payload: Index, opt_payload: Index, - comptime_field: Index, elem: BaseIndex, field: BaseIndex, @@ -703,24 +703,27 @@ pub const Key = union(enum) { .aggregate => |aggregate| { std.hash.autoHash(hasher, aggregate.ty); switch (ip.indexToKey(aggregate.ty)) { - .array_type => |array_type| if (array_type.child == .u8_type) switch (aggregate.storage) { - .bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte), - .elems => |elems| { - var buffer: Key.Int.Storage.BigIntSpace = undefined; - for (elems) |elem| std.hash.autoHash( - hasher, - ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch - unreachable, - ); - }, - .repeated_elem => |elem| { - const len = ip.aggregateTypeLen(aggregate.ty); - var buffer: Key.Int.Storage.BigIntSpace = undefined; - const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch - unreachable; - var i: u64 = 0; - while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); - }, + .array_type => |array_type| if (array_type.child == .u8_type) { + switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte), + .elems => |elems| { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + for (elems) |elem| std.hash.autoHash( + hasher, + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable, + ); + }, + .repeated_elem => |elem| { + const len = ip.aggregateTypeLen(aggregate.ty); + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable; + var i: u64 = 0; + while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); + }, + } + return; }, else => {}, } @@ -2860,6 +2863,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .array_type => |array_type| { assert(array_type.child != .none); + assert(array_type.sentinel == .none or ip.typeOf(array_type.sentinel) == array_type.child); if (std.math.cast(u32, array_type.len)) |len| { if (array_type.sentinel == .none) { @@ -3230,7 +3234,23 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .int => |int| b: { - assert(int.ty == .comptime_int_type or ip.indexToKey(int.ty) == .int_type); + switch (int.ty) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .comptime_int_type, + => {}, + else => assert(ip.indexToKey(int.ty) == .int_type), + } switch (int.storage) { .u64, .i64, .big_int => {}, .lazy_align, .lazy_size => |lazy_ty| { diff --git a/src/Liveness.zig b/src/Liveness.zig index 856123fa9d27..c30708e1400f 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -323,7 +323,6 @@ pub fn categorizeOperand( .alloc, .ret_ptr, .constant, - .const_ty, .interned, .trap, .breakpoint, @@ -975,7 +974,6 @@ fn analyzeInst( => return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }), .constant, - .const_ty, .interned, => unreachable, @@ -1272,7 +1270,7 @@ fn analyzeOperands( // Don't compute any liveness for constants switch (inst_tags[operand]) { - .constant, .const_ty, .interned => continue, + .constant, .interned => continue, else => {}, } @@ -1308,7 +1306,7 @@ fn analyzeOperands( // Don't compute any liveness for constants switch (inst_tags[operand]) { - .constant, .const_ty, .interned => continue, + .constant, .interned => continue, else => {}, } @@ -1842,7 +1840,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // Don't compute any liveness for constants const inst_tags = big.a.air.instructions.items(.tag); switch (inst_tags[operand]) { - .constant, .const_ty, .interned => return, + .constant, .interned => return, else => {}, } diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index 923e6f56589e..703d561559d3 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -43,7 +43,6 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .alloc, .ret_ptr, .constant, - .const_ty, .interned, .breakpoint, .dbg_stmt, @@ -557,7 +556,7 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void { const operand = Air.refToIndexAllowNone(op_ref) orelse return; switch (self.air.instructions.items(.tag)[operand]) { - .constant, .const_ty, .interned => {}, + .constant, .interned => {}, else => { if (dies) { if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); @@ -579,7 +578,7 @@ fn verifyInst( } const tag = self.air.instructions.items(.tag); switch (tag[inst]) { - .constant, .const_ty, .interned => unreachable, + .constant, .interned => unreachable, else => { if (self.liveness.isUnused(inst)) { assert(!self.live.contains(inst)); diff --git a/src/Module.zig b/src/Module.zig index fa24c237b43c..47f7643b9fe7 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -85,20 +85,13 @@ import_table: std.StringArrayHashMapUnmanaged(*File) = .{}, /// Keys are fully resolved file paths. This table owns the keys and values. embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, -/// This is a temporary addition to stage2 in order to match legacy behavior, -/// however the end-game once the lang spec is settled will be to use a global -/// InternPool for comptime memoized objects, making this behavior consistent across all types, -/// not only string literals. Or, we might decide to not guarantee string literals -/// to have equal comptime pointers, in which case this field can be deleted (perhaps -/// the commit that introduced it can simply be reverted). -/// This table uses an optional index so that when a Decl is destroyed, the string literal -/// is still reclaimable by a future Decl. -string_literal_table: std.HashMapUnmanaged(StringLiteralContext.Key, Decl.OptionalIndex, StringLiteralContext, std.hash_map.default_max_load_percentage) = .{}, -string_literal_bytes: ArrayListUnmanaged(u8) = .{}, - /// Stores all Type and Value objects; periodically garbage collected. intern_pool: InternPool = .{}, +/// This is currently only used for string literals, however the end-game once the lang spec +/// is settled will be to make this behavior consistent across all types. +memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, + /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. @@ -208,39 +201,6 @@ pub const CImportError = struct { } }; -pub const StringLiteralContext = struct { - bytes: *ArrayListUnmanaged(u8), - - pub const Key = struct { - index: u32, - len: u32, - }; - - pub fn eql(self: @This(), a: Key, b: Key) bool { - _ = self; - return a.index == b.index and a.len == b.len; - } - - pub fn hash(self: @This(), x: Key) u64 { - const x_slice = self.bytes.items[x.index..][0..x.len]; - return std.hash_map.hashString(x_slice); - } -}; - -pub const StringLiteralAdapter = struct { - bytes: *ArrayListUnmanaged(u8), - - pub fn eql(self: @This(), a_slice: []const u8, b: StringLiteralContext.Key) bool { - const b_slice = self.bytes.items[b.index..][0..b.len]; - return mem.eql(u8, a_slice, b_slice); - } - - pub fn hash(self: @This(), adapted_key: []const u8) u64 { - _ = self; - return std.hash_map.hashString(adapted_key); - } -}; - const MonomorphedFuncsSet = std.HashMapUnmanaged( Fn.Index, void, @@ -660,14 +620,8 @@ pub const Decl = struct { } mod.destroyFunc(func); } + _ = mod.memoized_decls.remove(decl.val.ip_index); if (decl.value_arena) |value_arena| { - if (decl.owns_tv) { - if (decl.val.castTag(.str_lit)) |str_lit| { - mod.string_literal_table.getPtrContext(str_lit.data, .{ - .bytes = &mod.string_literal_bytes, - }).?.* = .none; - } - } value_arena.deinit(gpa); decl.value_arena = null; decl.has_tv = false; @@ -834,7 +788,7 @@ pub const Decl = struct { pub fn getStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex { if (!decl.owns_tv) return .none; if (decl.val.ip_index == .none) return .none; - return mod.intern_pool.indexToStructType(decl.val.ip_index); + return mod.intern_pool.indexToStructType(decl.val.toIntern()); } /// If the Decl has a value and it is a union, return it, @@ -875,7 +829,7 @@ pub const Decl = struct { return switch (decl.val.ip_index) { .empty_struct_type => .none, .none => .none, - else => switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(decl.val.toIntern())) { .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), .struct_type => |struct_type| struct_type.namespace, .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), @@ -919,7 +873,7 @@ pub const Decl = struct { pub fn isExtern(decl: Decl, mod: *Module) bool { assert(decl.has_tv); - return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + return switch (mod.intern_pool.indexToKey(decl.val.toIntern())) { .variable => |variable| variable.is_extern, .extern_func => true, else => false, @@ -1577,11 +1531,11 @@ pub const Fn = struct { ip: *InternPool, gpa: Allocator, ) !void { - switch (err_set_ty.ip_index) { + switch (err_set_ty.toIntern()) { .anyerror_type => { self.is_anyerror = true; }, - else => switch (ip.indexToKey(err_set_ty.ip_index)) { + else => switch (ip.indexToKey(err_set_ty.toIntern())) { .error_set_type => |error_set_type| { for (error_set_type.names) |name| { try self.errors.put(gpa, name, {}); @@ -3396,8 +3350,7 @@ pub fn deinit(mod: *Module) void { mod.namespaces_free_list.deinit(gpa); mod.allocated_namespaces.deinit(gpa); - mod.string_literal_table.deinit(gpa); - mod.string_literal_bytes.deinit(gpa); + mod.memoized_decls.deinit(gpa); mod.intern_pool.deinit(gpa); } @@ -4702,7 +4655,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return true; } - if (mod.intern_pool.indexToFunc(decl_tv.val.ip_index).unwrap()) |func_index| { + if (mod.intern_pool.indexToFunc(decl_tv.val.toIntern()).unwrap()) |func_index| { const func = mod.funcPtr(func_index); const owns_tv = func.owner_decl == decl_index; if (owns_tv) { @@ -4749,10 +4702,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.owns_tv = false; var queue_linker_work = false; var is_extern = false; - switch (decl_tv.val.ip_index) { + switch (decl_tv.val.toIntern()) { .generic_poison => unreachable, .unreachable_value => unreachable, - else => switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { .variable => |variable| if (variable.decl == decl_index) { decl.owns_tv = true; queue_linker_work = true; @@ -4792,7 +4745,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr; }; decl.@"addrspace" = blk: { - const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) { + const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { .variable => .variable, .extern_func, .func => .function, else => .constant, @@ -6497,40 +6450,33 @@ pub fn populateTestFunctions( const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions // decl reference it as a slice. - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const arena = new_decl_arena.allocator(); - - const test_fn_vals = try arena.alloc(Value, mod.test_functions.count()); - const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ - .ty = try mod.arrayType(.{ - .len = test_fn_vals.len, - .child = test_fn_ty.ip_index, - .sentinel = .none, - }), - .val = try Value.Tag.aggregate.create(arena, test_fn_vals), - }); - const array_decl = mod.declPtr(array_decl_index); + const test_fn_vals = try gpa.alloc(InternPool.Index, mod.test_functions.count()); + defer gpa.free(test_fn_vals); // Add a dependency on each test name and function pointer. - try array_decl.dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2); + var array_decl_dependencies = std.ArrayListUnmanaged(Decl.Index){}; + defer array_decl_dependencies.deinit(gpa); + try array_decl_dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2); - for (mod.test_functions.keys(), 0..) |test_decl_index, i| { + for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = mod.declPtr(test_decl_index); - const test_name_slice = mem.sliceTo(test_decl.name, 0); const test_name_decl_index = n: { - var name_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer name_decl_arena.deinit(); - const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice); - const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ - .ty = try mod.arrayType(.{ .len = bytes.len, .child = .u8_type }), - .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes), + const test_decl_name = mem.span(test_decl.name); + const test_name_decl_ty = try mod.arrayType(.{ + .len = test_decl_name.len, + .child = .u8_type, + }); + const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ + .ty = test_name_decl_ty, + .val = (try mod.intern(.{ .aggregate = .{ + .ty = test_name_decl_ty.toIntern(), + .storage = .{ .bytes = test_decl_name }, + } })).toValue(), }); - try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena); break :n test_name_decl_index; }; - array_decl.dependencies.putAssumeCapacityNoClobber(test_decl_index, .normal); - array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, .normal); + array_decl_dependencies.appendAssumeCapacity(test_decl_index); + array_decl_dependencies.appendAssumeCapacity(test_name_decl_index); try mod.linkerUpdateDecl(test_name_decl_index); const test_fn_fields = .{ @@ -6541,36 +6487,51 @@ pub fn populateTestFunctions( } }), // func try mod.intern(.{ .ptr = .{ - .ty = test_decl.ty.ip_index, + .ty = test_decl.ty.toIntern(), .addr = .{ .decl = test_decl_index }, } }), // async_frame_size null_usize, }; - test_fn_vals[i] = (try mod.intern(.{ .aggregate = .{ - .ty = test_fn_ty.ip_index, + test_fn_val.* = try mod.intern(.{ .aggregate = .{ + .ty = test_fn_ty.toIntern(), .storage = .{ .elems = &test_fn_fields }, - } })).toValue(); + } }); + } + + const array_decl_ty = try mod.arrayType(.{ + .len = test_fn_vals.len, + .child = test_fn_ty.toIntern(), + .sentinel = .none, + }); + const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ + .ty = array_decl_ty, + .val = (try mod.intern(.{ .aggregate = .{ + .ty = array_decl_ty.toIntern(), + .storage = .{ .elems = test_fn_vals }, + } })).toValue(), + }); + for (array_decl_dependencies.items) |array_decl_dependency| { + try mod.declareDeclDependency(array_decl_index, array_decl_dependency); } - try array_decl.finalizeNewArena(&new_decl_arena); break :d array_decl_index; }; try mod.linkerUpdateDecl(array_decl_index); { const new_ty = try mod.ptrType(.{ - .elem_type = test_fn_ty.ip_index, + .elem_type = test_fn_ty.toIntern(), .is_const = true, .size = .Slice, }); const new_val = decl.val; const new_init = try mod.intern(.{ .ptr = .{ - .ty = new_ty.ip_index, + .ty = new_ty.toIntern(), .addr = .{ .decl = array_decl_index }, - .len = (try mod.intValue(Type.usize, mod.test_functions.count())).ip_index, + .len = (try mod.intValue(Type.usize, mod.test_functions.count())).toIntern(), } }); - mod.intern_pool.mutateVarInit(decl.val.ip_index, new_init); + mod.intern_pool.mutateVarInit(decl.val.toIntern(), new_init); // Since we are replacing the Decl's value we must perform cleanup on the // previous value. @@ -6650,47 +6611,32 @@ fn reportRetryableFileError( } pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void { - switch (val.ip_index) { - .none => switch (val.tag()) { - .aggregate => { - for (val.castTag(.aggregate).?.data) |field_val| { - mod.markReferencedDeclsAlive(field_val); - } - }, - .@"union" => { - const data = val.castTag(.@"union").?.data; - mod.markReferencedDeclsAlive(data.tag); - mod.markReferencedDeclsAlive(data.val); - }, - else => {}, + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| mod.markDeclIndexAlive(variable.decl), + .extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl), + .func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), + .error_union => |error_union| switch (error_union.val) { + .err_name => {}, + .payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()), }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .variable => |variable| mod.markDeclIndexAlive(variable.decl), - .extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl), - .func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), - .error_union => |error_union| switch (error_union.val) { - .err_name => {}, - .payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()), - }, - .ptr => |ptr| { - switch (ptr.addr) { - .decl => |decl| mod.markDeclIndexAlive(decl), - .mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl), - .int, .comptime_field => {}, - .eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()), - .elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()), - } - if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue()); - }, - .opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()), - .aggregate => |aggregate| for (aggregate.storage.values()) |elem| - mod.markReferencedDeclsAlive(elem.toValue()), - .un => |un| { - mod.markReferencedDeclsAlive(un.tag.toValue()); - mod.markReferencedDeclsAlive(un.val.toValue()); - }, - else => {}, + .ptr => |ptr| { + switch (ptr.addr) { + .decl => |decl| mod.markDeclIndexAlive(decl), + .mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl), + .int, .comptime_field => {}, + .eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()), + .elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()), + } + if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue()); + }, + .opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()), + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| + mod.markReferencedDeclsAlive(elem.toValue()), + .un => |un| { + mod.markReferencedDeclsAlive(un.tag.toValue()); + mod.markReferencedDeclsAlive(un.val.toValue()); }, + else => {}, } } @@ -6796,11 +6742,11 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type } pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - return ptrType(mod, .{ .elem_type = child_type.ip_index }); + return ptrType(mod, .{ .elem_type = child_type.toIntern() }); } pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); + return ptrType(mod, .{ .elem_type = child_type.toIntern(), .is_const = true }); } pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { @@ -6871,9 +6817,9 @@ pub fn errorSetFromUnsortedNames( pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { if (ty.isPtrLikeOptional(mod)) { const i = try intern(mod, .{ .opt = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .val = try intern(mod, .{ .ptr = .{ - .ty = ty.childType(mod).ip_index, + .ty = ty.childType(mod).toIntern(), .addr = .{ .int = try intern(mod, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = x }, @@ -6890,7 +6836,7 @@ pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { assert(ty.zigTypeTag(mod) == .Pointer); const i = try intern(mod, .{ .ptr = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .addr = .{ .int = try intern(mod, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = x }, @@ -6906,7 +6852,7 @@ pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Er assert(tag == .Enum); } const i = try intern(mod, .{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = tag_int, } }); return i.toValue(); @@ -6917,12 +6863,12 @@ pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Er pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value { const ip = &mod.intern_pool; const gpa = mod.gpa; - const enum_type = ip.indexToKey(ty.ip_index).enum_type; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; if (enum_type.values.len == 0) { // Auto-numbered fields. return (try ip.get(gpa, .{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = try ip.get(gpa, .{ .int = .{ .ty = enum_type.tag_ty, .storage = .{ .u64 = field_index }, @@ -6931,7 +6877,7 @@ pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.E } return (try ip.get(gpa, .{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = enum_type.values[field_index], } })).toValue(); } @@ -6950,7 +6896,7 @@ pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Value { const i = try intern(mod, .{ .int = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .big_int = x }, } }); return i.toValue(); @@ -6958,7 +6904,7 @@ pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Valu pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { const i = try intern(mod, .{ .int = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .u64 = x }, } }); return i.toValue(); @@ -6966,7 +6912,7 @@ pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { const i = try intern(mod, .{ .int = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .i64 = x }, } }); return i.toValue(); @@ -6974,9 +6920,9 @@ pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value { const i = try intern(mod, .{ .un = .{ - .ty = union_ty.ip_index, - .tag = tag.ip_index, - .val = val.ip_index, + .ty = union_ty.toIntern(), + .tag = tag.toIntern(), + .val = val.toIntern(), } }); return i.toValue(); } @@ -6993,7 +6939,7 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { else => unreachable, }; const i = try intern(mod, .{ .float = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = storage, } }); return i.toValue(); @@ -7001,9 +6947,9 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value { const ip = &mod.intern_pool; - assert(ip.isOptionalType(opt_ty.ip_index)); + assert(ip.isOptionalType(opt_ty.toIntern())); const result = try ip.get(mod.gpa, .{ .opt = .{ - .ty = opt_ty.ip_index, + .ty = opt_ty.toIntern(), .val = .none, } }); return result.toValue(); @@ -7042,7 +6988,7 @@ pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { assert(!val.isUndef(mod)); - const key = mod.intern_pool.indexToKey(val.ip_index); + const key = mod.intern_pool.indexToKey(val.toIntern()); switch (key.int.storage) { .i64 => |x| { if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted); @@ -7221,19 +7167,19 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I /// * Not a struct. pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct { if (ty.ip_index == .none) return null; - const struct_index = mod.intern_pool.indexToStructType(ty.ip_index).unwrap() orelse return null; + const struct_index = mod.intern_pool.indexToStructType(ty.toIntern()).unwrap() orelse return null; return mod.structPtr(struct_index); } pub fn typeToUnion(mod: *Module, ty: Type) ?*Union { if (ty.ip_index == .none) return null; - const union_index = mod.intern_pool.indexToUnionType(ty.ip_index).unwrap() orelse return null; + const union_index = mod.intern_pool.indexToUnionType(ty.toIntern()).unwrap() orelse return null; return mod.unionPtr(union_index); } pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { if (ty.ip_index == .none) return null; - return mod.intern_pool.indexToFuncType(ty.ip_index); + return mod.intern_pool.indexToFuncType(ty.toIntern()); } pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet { @@ -7243,7 +7189,7 @@ pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet { pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) Fn.InferredErrorSet.OptionalIndex { if (ty.ip_index == .none) return .none; - return mod.intern_pool.indexToInferredErrorSetType(ty.ip_index); + return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern()); } pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { @@ -7268,5 +7214,5 @@ pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQu } pub fn toEnum(mod: *Module, comptime E: type, val: Value) E { - return mod.intern_pool.toEnum(E, val.ip_index); + return mod.intern_pool.toEnum(E, val.toIntern()); } diff --git a/src/Sema.zig b/src/Sema.zig index d9b346e63829..4478f26bf45e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1866,9 +1866,9 @@ fn resolveConstMaybeUndefVal( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(inst)) |val| { - switch (val.ip_index) { + switch (val.toIntern()) { .generic_poison => return error.GenericPoison, - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, }, @@ -1887,10 +1887,10 @@ fn resolveConstValue( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| { - switch (val.ip_index) { + switch (val.toIntern()) { .generic_poison => return error.GenericPoison, .undef => return sema.failWithUseOfUndef(block, src), - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { .undef => return sema.failWithUseOfUndef(block, src), .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, @@ -1930,7 +1930,7 @@ fn resolveMaybeUndefVal( switch (val.ip_index) { .generic_poison => return error.GenericPoison, .none => return val, - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { .variable => return null, else => return val, }, @@ -1950,7 +1950,7 @@ fn resolveMaybeUndefValIntable( while (true) switch (check.ip_index) { .generic_poison => return error.GenericPoison, .none => break, - else => switch (sema.mod.intern_pool.indexToKey(check.ip_index)) { + else => switch (sema.mod.intern_pool.indexToKey(check.toIntern())) { .variable => return null, .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl, .comptime_field => return null, @@ -2007,7 +2007,6 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, - .const_ty => return air_datas[i].ty.toValue(), .interned => return air_datas[i].interned.toValue(), else => return null, } @@ -2490,7 +2489,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE }); try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ - .ty = ptr_ty.ip_index, + .ty = ptr_ty.toIntern(), .addr = .{ .mut_decl = .{ .decl = iac.data.decl_index, .runtime_index = block.runtime_index, @@ -2988,7 +2987,7 @@ fn zirEnumDecl( if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)}); } - incomplete_enum.setTagType(&mod.intern_pool, ty.ip_index); + incomplete_enum.setTagType(&mod.intern_pool, ty.toIntern()); break :ty ty; } else if (fields_len == 0) { break :ty try mod.intType(.unsigned, 0); @@ -2998,7 +2997,7 @@ fn zirEnumDecl( } }; - if (small.nonexhaustive and int_tag_ty.ip_index != .comptime_int_type) { + if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) { if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) { return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); } @@ -3051,7 +3050,7 @@ fn zirEnumDecl( else => |e| return e, }; last_tag_val = tag_val; - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.ip_index)) |other_index| { + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.toIntern())) |other_index| { const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, @@ -3071,7 +3070,7 @@ fn zirEnumDecl( else try mod.intValue(int_tag_ty, 0); last_tag_val = tag_val; - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.ip_index)) |other_index| { + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.toIntern())) |other_index| { const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { @@ -3742,7 +3741,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com try sema.maybeQueueFuncBodyAnalysis(decl_index); sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ - .ty = final_ptr_ty.ip_index, + .ty = final_ptr_ty.toIntern(), .addr = if (var_is_mut) .{ .mut_decl = .{ .decl = decl_index, .runtime_index = block.runtime_index, @@ -3842,7 +3841,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com block.instructions.shrinkRetainingCapacity(search_index); try sema.maybeQueueFuncBodyAnalysis(new_decl_index); sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ - .ty = final_elem_ty.ip_index, + .ty = final_elem_ty.toIntern(), .addr = .{ .decl = new_decl_index }, } })).toValue(); // if bitcast ty ref needs to be made const, make_ptr_const @@ -4341,12 +4340,12 @@ fn validateUnionInit( block.instructions.shrinkRetainingCapacity(first_block_index); var union_val = try mod.intern(.{ .un = .{ - .ty = union_ty.ip_index, - .tag = tag_val.ip_index, - .val = val.ip_index, + .ty = union_ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val.toIntern(), } }); if (make_runtime) union_val = try mod.intern(.{ .runtime_value = .{ - .ty = union_ty.ip_index, + .ty = union_ty.toIntern(), .val = union_val, } }); const union_init = try sema.addConstant(union_ty, union_val.toValue()); @@ -4417,7 +4416,7 @@ fn validateStructInit( if (field_ptr != 0) continue; const default_val = struct_ty.structFieldDefaultValue(i, mod); - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -4476,15 +4475,14 @@ fn validateStructInit( // We collect the comptime field values in case the struct initialization // ends up being comptime-known. - const field_values = try sema.gpa.alloc(InternPool.Index, struct_ty.structFieldCount(mod)); - defer sema.gpa.free(field_values); + const field_values = try sema.arena.alloc(InternPool.Index, struct_ty.structFieldCount(mod)); field: for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) { // Determine whether the value stored to this pointer is comptime-known. const field_ty = struct_ty.structFieldType(i, mod); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { - field_values[i] = opv.ip_index; + field_values[i] = opv.toIntern(); continue; } @@ -4549,7 +4547,7 @@ fn validateStructInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - field_values[i] = val.ip_index; + field_values[i] = val.toIntern(); } else if (require_comptime) { const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only struct must be comptime-known"); @@ -4563,7 +4561,7 @@ fn validateStructInit( } const default_val = struct_ty.structFieldDefaultValue(i, mod); - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -4583,7 +4581,7 @@ fn validateStructInit( } continue; } - field_values[i] = default_val.ip_index; + field_values[i] = default_val.toIntern(); } if (root_msg) |msg| { @@ -4607,11 +4605,11 @@ fn validateStructInit( block.instructions.shrinkRetainingCapacity(first_block_index); var struct_val = try mod.intern(.{ .aggregate = .{ - .ty = struct_ty.ip_index, + .ty = struct_ty.toIntern(), .storage = .{ .elems = field_values }, } }); if (make_runtime) struct_val = try mod.intern(.{ .runtime_value = .{ - .ty = struct_ty.ip_index, + .ty = struct_ty.toIntern(), .val = struct_val, } }); const struct_init = try sema.addConstant(struct_ty, struct_val.toValue()); @@ -4659,7 +4657,7 @@ fn zirValidateArrayInit( var i = instrs.len; while (i < array_len) : (i += 1) { const default_val = array_ty.structFieldDefaultValue(i, mod); - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4710,8 +4708,7 @@ fn zirValidateArrayInit( // Collect the comptime element values in case the array literal ends up // being comptime-known. const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod)); - const element_vals = try sema.gpa.alloc(InternPool.Index, array_len_s); - defer sema.gpa.free(element_vals); + const element_vals = try sema.arena.alloc(InternPool.Index, array_len_s); const opt_opv = try sema.typeHasOnePossibleValue(array_ty); const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); @@ -4721,13 +4718,13 @@ fn zirValidateArrayInit( if (array_ty.isTuple(mod)) { if (try array_ty.structFieldValueComptime(mod, i)) |opv| { - element_vals[i] = opv.ip_index; + element_vals[i] = opv.toIntern(); continue; } } else { // Array has one possible value, so value is always comptime-known if (opt_opv) |opv| { - element_vals[i] = opv.ip_index; + element_vals[i] = opv.toIntern(); continue; } } @@ -4788,7 +4785,7 @@ fn zirValidateArrayInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - element_vals[i] = val.ip_index; + element_vals[i] = val.toIntern(); } else { array_is_comptime = false; } @@ -4800,7 +4797,7 @@ fn zirValidateArrayInit( if (array_is_comptime) { if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| { - switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .comptime_field => return, // This store was validated by the individual elem ptrs. else => {}, @@ -4813,17 +4810,17 @@ fn zirValidateArrayInit( // instead a single `store` to the array_ptr with a comptime struct value. // Also to populate the sentinel value, if any. if (array_ty.sentinel(mod)) |sentinel_val| { - element_vals[instrs.len] = sentinel_val.ip_index; + element_vals[instrs.len] = sentinel_val.toIntern(); } block.instructions.shrinkRetainingCapacity(first_block_index); var array_val = try mod.intern(.{ .aggregate = .{ - .ty = array_ty.ip_index, + .ty = array_ty.toIntern(), .storage = .{ .elems = element_vals }, } }); if (make_runtime) array_val = try mod.intern(.{ .runtime_value = .{ - .ty = array_ty.ip_index, + .ty = array_ty.toIntern(), .val = array_val, } }); const array_init = try sema.addConstant(array_ty, array_val.toValue()); @@ -5144,41 +5141,26 @@ fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air // expression of a variable declaration. const mod = sema.mod; const gpa = sema.gpa; - const string_bytes = &mod.string_literal_bytes; - const StringLiteralAdapter = Module.StringLiteralAdapter; - const StringLiteralContext = Module.StringLiteralContext; - try string_bytes.ensureUnusedCapacity(gpa, zir_bytes.len); - const gop = try mod.string_literal_table.getOrPutContextAdapted(gpa, zir_bytes, StringLiteralAdapter{ - .bytes = string_bytes, - }, StringLiteralContext{ - .bytes = string_bytes, + const ty = try mod.arrayType(.{ + .len = zir_bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, }); + const val = try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = zir_bytes }, + } }); + const gop = try mod.memoized_decls.getOrPut(gpa, val); if (!gop.found_existing) { - gop.key_ptr.* = .{ - .index = @intCast(u32, string_bytes.items.len), - .len = @intCast(u32, zir_bytes.len), - }; - string_bytes.appendSliceAssumeCapacity(zir_bytes); - gop.value_ptr.* = .none; - } - const decl_index = gop.value_ptr.unwrap() orelse di: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const decl_index = try anon_decl.finish( - try Type.array(anon_decl.arena(), gop.key_ptr.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.str_lit.create(anon_decl.arena(), gop.key_ptr.*), - 0, // default alignment - ); - - // Needed so that `Decl.clearValues` will additionally set the corresponding - // string literal table value back to `Decl.OptionalIndex.none`. - mod.declPtr(decl_index).owns_tv = true; + const decl_index = try anon_decl.finish(ty, val.toValue(), 0); - gop.value_ptr.* = decl_index.toOptional(); - break :di decl_index; - }; - return sema.analyzeDeclRef(decl_index); + gop.key_ptr.* = val; + gop.value_ptr.* = decl_index; + } + return sema.analyzeDeclRef(gop.value_ptr.*); } fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -6218,7 +6200,7 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { const mod = sema.mod; const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null; if (func_val.isUndef(mod)) return null; - const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .extern_func => |extern_func| extern_func.decl, .func => |func| mod.funcPtr(func.index).owner_decl, .ptr => |ptr| switch (ptr.addr) { @@ -6792,7 +6774,7 @@ fn analyzeCall( if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err); return err; }; - const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), @@ -6996,7 +6978,7 @@ fn analyzeCall( } break :blk bare_return_type; }; - new_fn_info.return_type = fn_ret_ty.ip_index; + new_fn_info.return_type = fn_ret_ty.toIntern(); const parent_fn_ret_ty = sema.fn_ret_ty; sema.fn_ret_ty = fn_ret_ty; defer sema.fn_ret_ty = parent_fn_ret_ty; @@ -7289,7 +7271,7 @@ fn analyzeInlineCallArg( if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; - switch (arg_val.ip_index) { + switch (arg_val.toIntern()) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. @@ -7328,7 +7310,7 @@ fn analyzeInlineCallArg( if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; - switch (arg_val.ip_index) { + switch (arg_val.toIntern()) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. @@ -7426,7 +7408,7 @@ fn instantiateGenericCall( const gpa = sema.gpa; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .func => |function| function.index, .ptr => |ptr| mod.declPtr(ptr.addr.decl).getFunctionIndex(mod).unwrap().?, else => unreachable, @@ -7911,7 +7893,7 @@ fn resolveGenericInstantiationType( } new_decl.val = (try mod.intern(.{ .func = .{ - .ty = new_decl.ty.ip_index, + .ty = new_decl.ty.toIntern(), .index = new_func, } })).toValue(); new_decl.@"align" = 0; @@ -7932,7 +7914,7 @@ fn resolveGenericInstantiationType( fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { const mod = sema.mod; - const tuple = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + const tuple = switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| tuple, else => return, }; @@ -7957,7 +7939,7 @@ fn emitDbgInline( if (old_func == new_func) return; try sema.air_values.append(sema.gpa, (try sema.mod.intern(.{ .func = .{ - .ty = new_func_ty.ip_index, + .ty = new_func_ty.toIntern(), .index = new_func, } })).toValue()); _ = try block.addInst(.{ @@ -8019,7 +8001,7 @@ fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.checkVectorElemType(block, elem_type_src, elem_type); const vector_type = try mod.vectorType(.{ .len = len, - .child = elem_type.ip_index, + .child = elem_type.toIntern(), }); return sema.addType(vector_type); } @@ -8129,7 +8111,7 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const kv = try sema.mod.getErrorValue(name); const error_set_type = try mod.singleErrorSetType(kv.key); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), } })).toValue()); } @@ -8149,7 +8131,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (val.isUndef(mod)) { return sema.addConstUndef(Type.err_int); } - const err_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + const err_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; return sema.addConstant(Type.err_int, try mod.intValue( Type.err_int, (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value, @@ -8240,7 +8222,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)}); // Anything merged with anyerror is anyerror. - if (lhs_ty.ip_index == .anyerror_type or rhs_ty.ip_index == .anyerror_type) { + if (lhs_ty.toIntern() == .anyerror_type or rhs_ty.toIntern() == .anyerror_type) { return Air.Inst.Ref.anyerror_type; } @@ -8445,8 +8427,8 @@ fn analyzeOptionalPayloadPtr( _ = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); } return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ - .ty = child_pointer.ip_index, - .addr = .{ .opt_payload = ptr_val.ip_index }, + .ty = child_pointer.toIntern(), + .addr = .{ .opt_payload = ptr_val.toIntern() }, } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { @@ -8455,8 +8437,8 @@ fn analyzeOptionalPayloadPtr( } // The same Value represents the pointer to the optional and the payload. return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ - .ty = child_pointer.ip_index, - .addr = .{ .opt_payload = ptr_val.ip_index }, + .ty = child_pointer.toIntern(), + .addr = .{ .opt_payload = ptr_val.toIntern() }, } })).toValue()); } } @@ -8565,7 +8547,7 @@ fn analyzeErrUnionPayload( } return sema.addConstant( payload_ty, - mod.intern_pool.indexToKey(val.ip_index).error_union.val.payload.toValue(), + mod.intern_pool.indexToKey(val.toIntern()).error_union.val.payload.toValue(), ); } @@ -8633,8 +8615,8 @@ fn analyzeErrUnionPayloadPtr( _ = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); } return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ - .ty = operand_pointer_ty.ip_index, - .addr = .{ .eu_payload = ptr_val.ip_index }, + .ty = operand_pointer_ty.toIntern(), + .addr = .{ .eu_payload = ptr_val.toIntern() }, } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { @@ -8642,8 +8624,8 @@ fn analyzeErrUnionPayloadPtr( return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ - .ty = operand_pointer_ty.ip_index, - .addr = .{ .eu_payload = ptr_val.ip_index }, + .ty = operand_pointer_ty.toIntern(), + .addr = .{ .eu_payload = ptr_val.toIntern() }, } })).toValue()); } } @@ -8828,7 +8810,7 @@ fn resolveGenericBody( }; switch (err) { error.GenericPoison => { - if (dest_ty.ip_index == .type_type) { + if (dest_ty.toIntern() == .type_type) { return Value.generic_poison_type; } else { return Value.generic_poison; @@ -9183,7 +9165,7 @@ fn funcCommon( if (is_extern) { return sema.addConstant(fn_ty, (try mod.intern(.{ .extern_func = .{ - .ty = fn_ty.ip_index, + .ty = fn_ty.toIntern(), .decl = sema.owner_decl_index, .lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString( gpa, @@ -9223,7 +9205,7 @@ fn funcCommon( .is_noinline = is_noinline, }; return sema.addConstant(fn_ty, (try mod.intern(.{ .func = .{ - .ty = fn_ty.ip_index, + .ty = fn_ty.toIntern(), .index = new_func_index, } })).toValue()); } @@ -10151,16 +10133,16 @@ fn zirSwitchCapture( .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ - .base = union_val.ip_index, + .base = union_val.toIntern(), .index = field_index, } }, } })).toValue()); } return sema.addConstant( field_ty, - mod.intern_pool.indexToKey(union_val.ip_index).un.val.toValue(), + mod.intern_pool.indexToKey(union_val.toIntern()).un.val.toValue(), ); } if (is_ref) { @@ -10256,9 +10238,9 @@ fn zirSwitchCapture( if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { return sema.addConstant(field_ty_ptr, (try mod.intern(.{ .ptr = .{ - .ty = field_ty_ptr.ip_index, + .ty = field_ty_ptr.toIntern(), .addr = .{ .field = .{ - .base = op_ptr_val.ip_index, + .base = op_ptr_val.toIntern(), .index = first_field_index, } }, } })).toValue()); @@ -11502,7 +11484,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError cases_len += 1; const item_val = try mod.intern(.{ .err = .{ - .ty = operand_ty.ip_index, + .ty = operand_ty.toIntern(), .name = error_name_ip, } }); const item_ref = try sema.addConstant(operand_ty, item_val.toValue()); @@ -11802,7 +11784,7 @@ fn validateSwitchItemError( const ip = &sema.mod.intern_pool; const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); // TODO: Do i need to typecheck here? - const error_name = ip.stringToSlice(ip.indexToKey(item_tv.val.ip_index).err.name); + const error_name = ip.stringToSlice(ip.indexToKey(item_tv.val.toIntern()).err.name); const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev| prev.value else @@ -12035,7 +12017,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ip = &mod.intern_pool; const has_field = hf: { - switch (ip.indexToKey(ty.ip_index)) { + switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice => { if (mem.eql(u8, field_name, "ptr")) break :hf true; @@ -12160,17 +12142,23 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1]; - - // TODO instead of using `Value.Tag.bytes`, create a new value tag for pointing at + // TODO instead of using `.bytes`, create a new value tag for pointing at // a `*Module.EmbedFile`. The purpose of this would be: // - If only the length is read and the bytes are not inspected by comptime code, // there can be an optimization where the codegen backend does a copy_file_range // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. + const ty = try mod.arrayType(.{ + .len = embed_file.bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); embed_file.owner_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), embed_file.bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), + ty, + (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = embed_file.bytes }, + } })).toValue(), 0, // default alignment ); @@ -12186,7 +12174,7 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R const kv = try mod.getErrorValue(err_name); const error_set_type = try mod.singleErrorSetType(kv.key); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = mod.intern_pool.getString(kv.key).unwrap().?, } })).toValue()); } @@ -12597,15 +12585,15 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.addConstUndef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(sema.mod, i); - elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod); + elem.* = try (try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod)).intern(scalar_type, mod); } - return sema.addConstant( - operand_type, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_type, (try mod.intern(.{ .aggregate = .{ + .ty = operand_type.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { const result_val = try val.bitwiseNot(operand_type, sema.arena, sema.mod); return sema.addConstant(operand_type, result_val); @@ -12652,22 +12640,22 @@ fn analyzeTupleCat( var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < lhs_len) : (i += 1) { - types[i] = lhs_ty.structFieldType(i, mod).ip_index; + types[i] = lhs_ty.structFieldType(i, mod).toIntern(); const default_val = lhs_ty.structFieldDefaultValue(i, mod); - values[i] = default_val.ip_index; + values[i] = default_val.toIntern(); const operand_src = lhs_src; // TODO better source location - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { runtime_src = operand_src; values[i] = .none; } } i = 0; while (i < rhs_len) : (i += 1) { - types[i + lhs_len] = rhs_ty.structFieldType(i, mod).ip_index; + types[i + lhs_len] = rhs_ty.structFieldType(i, mod).toIntern(); const default_val = rhs_ty.structFieldDefaultValue(i, mod); - values[i + lhs_len] = default_val.ip_index; + values[i + lhs_len] = default_val.toIntern(); const operand_src = rhs_src; // TODO better source location - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { runtime_src = operand_src; values[i + lhs_len] = .none; } @@ -12824,34 +12812,32 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else rhs_val; - const final_len_including_sent = result_len + @boolToInt(res_sent_val != null); - const element_vals = try sema.arena.alloc(Value, final_len_including_sent); + const element_vals = try sema.arena.alloc(InternPool.Index, result_len); var elem_i: usize = 0; while (elem_i < lhs_len) : (elem_i += 1) { const lhs_elem_i = elem_i; const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i, mod) else lhs_info.elem_type; const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable"; - const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); - element_vals[elem_i] = coerced_elem_val; + element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod); } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i, mod) else rhs_info.elem_type; const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable"; - const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); - element_vals[elem_i] = coerced_elem_val; - } - if (res_sent_val) |sent_val| { - element_vals[result_len] = sent_val; + element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod); } - const val = try Value.Tag.aggregate.create(sema.arena, element_vals); - return sema.addConstantMaybeRef(block, result_ty, val, ptr_addrspace != null); + return sema.addConstantMaybeRef(block, result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue(), ptr_addrspace != null); } else break :rs rhs_src; } else lhs_src; @@ -12978,8 +12964,8 @@ fn analyzeTupleMul( const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; for (0..tuple_len) |i| { - types[i] = operand_ty.structFieldType(i, mod).ip_index; - values[i] = operand_ty.structFieldDefaultValue(i, mod).ip_index; + types[i] = operand_ty.structFieldType(i, mod).toIntern(); + values[i] = operand_ty.structFieldDefaultValue(i, mod).toIntern(); const operand_src = lhs_src; // TODO better source location if (values[i] == .unreachable_value) { runtime_src = operand_src; @@ -13086,8 +13072,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_len == 1) { const elem_val = try lhs_sub_val.elemValue(mod, 0); break :v try mod.intern(.{ .aggregate = .{ - .ty = result_ty.ip_index, - .storage = .{ .repeated_elem = elem_val.ip_index }, + .ty = result_ty.toIntern(), + .storage = .{ .repeated_elem = elem_val.toIntern() }, } }); } @@ -13097,16 +13083,15 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var lhs_i: usize = 0; while (lhs_i < lhs_len) : (lhs_i += 1) { const elem_val = try lhs_sub_val.elemValue(mod, lhs_i); - assert(elem_val.ip_index != .none); - element_vals[elem_i] = elem_val.ip_index; + element_vals[elem_i] = elem_val.toIntern(); elem_i += 1; } } if (lhs_info.sentinel) |sent_val| { - element_vals[result_len] = sent_val.ip_index; + element_vals[result_len] = sent_val.toIntern(); } break :v try mod.intern(.{ .aggregate = .{ - .ty = result_ty.ip_index, + .ty = result_ty.toIntern(), .storage = .{ .elems = element_vals }, } }); }; @@ -13998,8 +13983,8 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. else => unreachable, }; const zero_val = if (is_vector) (try mod.intern(.{ .aggregate = .{ - .ty = resolved_type.ip_index, - .storage = .{ .repeated_elem = scalar_zero.ip_index }, + .ty = resolved_type.toIntern(), + .storage = .{ .repeated_elem = scalar_zero.toIntern() }, } })).toValue() else scalar_zero; return sema.addConstant(resolved_type, zero_val); } @@ -14079,14 +14064,17 @@ fn intRem( ) CompileError!Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty); + scalar.* = try (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.intRemScalar(lhs, rhs, ty); } @@ -14517,11 +14505,13 @@ fn zirOverflowArithmetic( } if (result.inst == .none) { - const values = try sema.arena.alloc(Value, 2); - values[0] = result.wrapped; - values[1] = result.overflow_bit; - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + return sema.addConstant(tuple_ty, (try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty.toIntern(), + .storage = .{ .elems = &.{ + result.wrapped.toIntern(), + result.overflow_bit.toIntern(), + } }, + } })).toValue()); } const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2); @@ -14534,8 +14524,8 @@ fn splat(sema: *Sema, ty: Type, val: Value) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) != .Vector) return val; const repeated = try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, - .storage = .{ .repeated_elem = val.ip_index }, + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = val.toIntern() }, } }); return repeated.toValue(); } @@ -14547,7 +14537,7 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { .child = .u1_type, }) else Type.u1; - const types = [2]InternPool.Index{ ty.ip_index, ov_ty.ip_index }; + const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() }; const values = [2]InternPool.Index{ .none, .none }; const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = &types, @@ -15731,7 +15721,7 @@ fn zirClosureGet( scope = scope.parent.?; }; - if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func_index == .none) { + if (tv.val.toIntern() == .unreachable_value and !block.is_typeof and sema.func_index == .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); @@ -15759,7 +15749,7 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { + if (tv.val.toIntern() == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); @@ -15789,7 +15779,7 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.ip_index == .unreachable_value) { + if (tv.val.toIntern() == .unreachable_value) { assert(block.is_typeof); // We need a dummy runtime instruction with the correct type. return block.addTy(.alloc, tv.ty); @@ -15840,10 +15830,17 @@ fn zirBuiltinSrc( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const name = mem.span(fn_owner_decl.name); - const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len - 1, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :blk try mod.intern(.{ .ptr = .{ @@ -15857,9 +15854,17 @@ fn zirBuiltinSrc( defer anon_decl.deinit(); // The compiler must not call realpath anywhere. const name = try fn_owner_decl.getFileScope(mod).fullPathZ(anon_decl.arena()); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), name.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :blk try mod.intern(.{ .ptr = .{ @@ -15877,13 +15882,13 @@ fn zirBuiltinSrc( // line: u32, try mod.intern(.{ .runtime_value = .{ .ty = .u32_type, - .val = (try mod.intValue(Type.u32, extra.line + 1)).ip_index, + .val = (try mod.intValue(Type.u32, extra.line + 1)).toIntern(), } }), // column: u32, - (try mod.intValue(Type.u32, extra.column + 1)).ip_index, + (try mod.intValue(Type.u32, extra.column + 1)).toIntern(), }; return sema.addConstant(src_loc_ty, (try mod.intern(.{ .aggregate = .{ - .ty = src_loc_ty.ip_index, + .ty = src_loc_ty.toIntern(), .storage = .{ .elems = &fields }, } })).toValue()); } @@ -15908,8 +15913,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Null, .EnumLiteral, => |type_info_tag| return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(type_info_tag))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(type_info_tag))).toIntern(), .val = .void_value, } })).toValue()), .Fn => { @@ -15941,8 +15946,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const param_info_decl = mod.declPtr(param_info_decl_index); const param_info_ty = param_info_decl.val.toType(); - const param_vals = try gpa.alloc(InternPool.Index, info.param_types.len); - defer gpa.free(param_vals); + const param_vals = try sema.arena.alloc(InternPool.Index, info.param_types.len); for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| { const is_generic = param_ty == .generic_poison_type; const param_ty_val = try mod.intern_pool.get(gpa, .{ .opt = .{ @@ -15957,40 +15961,40 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const param_fields = .{ // is_generic: bool, - Value.makeBool(is_generic).ip_index, + Value.makeBool(is_generic).toIntern(), // is_noalias: bool, - Value.makeBool(is_noalias).ip_index, + Value.makeBool(is_noalias).toIntern(), // type: ?type, param_ty_val, }; param_val.* = try mod.intern(.{ .aggregate = .{ - .ty = param_info_ty.ip_index, + .ty = param_info_ty.toIntern(), .storage = .{ .elems = ¶m_fields }, } }); } const args_val = v: { const args_slice_ty = try mod.ptrType(.{ - .elem_type = param_info_ty.ip_index, + .elem_type = param_info_ty.toIntern(), .size = .Slice, .is_const = true, }); const new_decl = try params_anon_decl.finish( try mod.arrayType(.{ .len = param_vals.len, - .child = param_info_ty.ip_index, + .child = param_info_ty.toIntern(), .sentinel = .none, }), (try mod.intern(.{ .aggregate = .{ - .ty = args_slice_ty.ip_index, + .ty = args_slice_ty.toIntern(), .storage = .{ .elems = param_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = args_slice_ty.ip_index, + .ty = args_slice_ty.toIntern(), .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, param_vals.len)).ip_index, + .len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(), } }); }; @@ -16003,43 +16007,55 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = .{ // calling_convention: CallingConvention, - (try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc))).ip_index, + (try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc))).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).ip_index, + (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).toIntern(), // is_generic: bool, - Value.makeBool(info.is_generic).ip_index, + Value.makeBool(info.is_generic).toIntern(), // is_var_args: bool, - Value.makeBool(info.is_var_args).ip_index, + Value.makeBool(info.is_var_args).toIntern(), // return_type: ?type, ret_ty_opt, // args: []const Fn.Param, args_val, }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = fn_info_ty.ip_index, + .ty = fn_info_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); }, .Int => { + const int_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Int", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, int_info_decl_index); + try sema.ensureDeclAnalyzed(int_info_decl_index); + const int_info_decl = mod.declPtr(int_info_decl_index); + const int_info_ty = int_info_decl.val.toType(); + const signedness_ty = try sema.getBuiltinType("Signedness"); const info = ty.intInfo(mod); - const field_values = try sema.arena.alloc(Value, 2); - // signedness: Signedness, - field_values[0] = try mod.enumValueFieldIndex(signedness_ty, @enumToInt(info.signedness)); - // bits: u16, - field_values[1] = try mod.intValue(Type.u16, info.bits); - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Int)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // signedness: Signedness, + try (try mod.enumValueFieldIndex(signedness_ty, @enumToInt(info.signedness))).intern(signedness_ty, mod), + // bits: u16, + (try mod.intValue(Type.u16, info.bits)).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Int))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = int_info_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Float => { const float_info_decl_index = (try sema.namespaceLookup( @@ -16051,17 +16067,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try mod.declareDeclDependency(sema.owner_decl_index, float_info_decl_index); try sema.ensureDeclAnalyzed(float_info_decl_index); const float_info_decl = mod.declPtr(float_info_decl_index); - const float_ty = float_info_decl.val.toType(); + const float_info_ty = float_info_decl.val.toType(); const field_vals = .{ // bits: u16, - (try mod.intValue(Type.u16, ty.bitSize(mod))).ip_index, + (try mod.intValue(Type.u16, ty.bitSize(mod))).toIntern(), }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = float_ty.ip_index, + .ty = float_info_ty.toIntern(), .storage = .{ .elems = &field_vals }, } }), } })).toValue()); @@ -16099,80 +16115,121 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t decl.val.toType(); }; - const field_values = try sema.arena.create([8]Value); - field_values.* = .{ + const field_values = .{ // size: Size, - try mod.enumValueFieldIndex(ptr_size_ty, @enumToInt(info.size)), + try (try mod.enumValueFieldIndex(ptr_size_ty, @enumToInt(info.size))).intern(ptr_size_ty, mod), // is_const: bool, - Value.makeBool(!info.mutable), + Value.makeBool(!info.mutable).toIntern(), // is_volatile: bool, - Value.makeBool(info.@"volatile"), + Value.makeBool(info.@"volatile").toIntern(), // alignment: comptime_int, - alignment, + alignment.toIntern(), // address_space: AddressSpace - try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace")), + try (try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace"))).intern(addrspace_ty, mod), // child: type, - info.pointee_type.toValue(), + info.pointee_type.toIntern(), // is_allowzero: bool, - Value.makeBool(info.@"allowzero"), + Value.makeBool(info.@"allowzero").toIntern(), // sentinel: ?*const anyopaque, - try sema.optRefValue(block, info.pointee_type, info.sentinel), + (try sema.optRefValue(block, info.pointee_type, info.sentinel)).toIntern(), }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Pointer)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Pointer))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = pointer_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Array => { - const info = ty.arrayInfo(mod); - const field_values = try sema.arena.alloc(Value, 3); - // len: comptime_int, - field_values[0] = try mod.intValue(Type.comptime_int, info.len); - // child: type, - field_values[1] = info.elem_type.toValue(); - // sentinel: ?*const anyopaque, - field_values[2] = try sema.optRefValue(block, info.elem_type, info.sentinel); + const array_field_ty = t: { + const array_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Array", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, array_field_ty_decl_index); + try sema.ensureDeclAnalyzed(array_field_ty_decl_index); + const array_field_ty_decl = mod.declPtr(array_field_ty_decl_index); + break :t array_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Array)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const info = ty.arrayInfo(mod); + const field_values = .{ + // len: comptime_int, + (try mod.intValue(Type.comptime_int, info.len)).toIntern(), + // child: type, + info.elem_type.toIntern(), + // sentinel: ?*const anyopaque, + (try sema.optRefValue(block, info.elem_type, info.sentinel)).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Array))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = array_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Vector => { - const info = ty.arrayInfo(mod); - const field_values = try sema.arena.alloc(Value, 2); - // len: comptime_int, - field_values[0] = try mod.intValue(Type.comptime_int, info.len); - // child: type, - field_values[1] = info.elem_type.toValue(); + const vector_field_ty = t: { + const vector_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Vector", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, vector_field_ty_decl_index); + try sema.ensureDeclAnalyzed(vector_field_ty_decl_index); + const vector_field_ty_decl = mod.declPtr(vector_field_ty_decl_index); + break :t vector_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const info = ty.arrayInfo(mod); + const field_values = .{ + // len: comptime_int, + (try mod.intValue(Type.comptime_int, info.len)).toIntern(), + // child: type, + info.elem_type.toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = vector_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Optional => { - const field_values = try sema.arena.alloc(Value, 1); - // child: type, - field_values[0] = ty.optionalChild(mod).toValue(); + const optional_field_ty = t: { + const optional_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Optional", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, optional_field_ty_decl_index); + try sema.ensureDeclAnalyzed(optional_field_ty_decl_index); + const optional_field_ty_decl = mod.declPtr(optional_field_ty_decl_index); + break :t optional_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Optional)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // child: type, + ty.optionalChild(mod).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = optional_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .ErrorSet => { var fields_anon_decl = try block.startAnonDecl(); @@ -16202,21 +16259,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Value can be zero-length slice otherwise const error_field_vals = if (ty.isAnyError(mod)) null else blk: { const names = ty.errorSetNames(mod); - const vals = try gpa.alloc(InternPool.Index, names.len); - defer gpa.free(vals); + const vals = try sema.arena.alloc(InternPool.Index, names.len); for (vals, names) |*field_val, name_ip| { const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, } }); }; @@ -16226,7 +16289,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai name_val, }; field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = error_field_ty.ip_index, + .ty = error_field_ty.toIntern(), .storage = .{ .elems = &error_field_fields }, } }); } @@ -16236,39 +16299,39 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Build our ?[]const Error value const slice_errors_ty = try mod.ptrType(.{ - .elem_type = error_field_ty.ip_index, + .elem_type = error_field_ty.toIntern(), .size = .Slice, .is_const = true, }); - const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.ip_index); + const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.toIntern()); const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: { const array_errors_ty = try mod.arrayType(.{ .len = vals.len, - .child = error_field_ty.ip_index, + .child = error_field_ty.toIntern(), .sentinel = .none, }); const new_decl = try fields_anon_decl.finish( array_errors_ty, (try mod.intern(.{ .aggregate = .{ - .ty = array_errors_ty.ip_index, + .ty = array_errors_ty.toIntern(), .storage = .{ .elems = vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = slice_errors_ty.ip_index, + .ty = slice_errors_ty.toIntern(), .addr = .{ .decl = new_decl }, } }); } else .none; const errors_val = try mod.intern(.{ .opt = .{ - .ty = opt_slice_errors_ty.ip_index, + .ty = opt_slice_errors_ty.toIntern(), .val = errors_payload_val, } }); // Construct Type{ .ErrorSet = errors_val } return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet))).toIntern(), .val = errors_val, } })).toValue()); }, @@ -16288,22 +16351,22 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = .{ // error_set: type, - ty.errorUnionSet(mod).ip_index, + ty.errorUnionSet(mod).toIntern(), // payload: type, - ty.errorUnionPayload(mod).ip_index, + ty.errorUnionPayload(mod).toIntern(), }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = error_union_field_ty.ip_index, + .ty = error_union_field_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); }, .Enum => { // TODO: look into memoizing this result. - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type; const is_exhaustive = Value.makeBool(enum_type.tag_mode != .nonexhaustive); @@ -16323,23 +16386,28 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t enum_field_ty_decl.val.toType(); }; - const enum_field_vals = try gpa.alloc(InternPool.Index, enum_type.names.len); - defer gpa.free(enum_field_vals); - + const enum_field_vals = try sema.arena.alloc(InternPool.Index, enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { const name_ip = enum_type.names[i]; const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, } }); }; @@ -16348,10 +16416,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // value: comptime_int, - (try mod.intValue(Type.comptime_int, i)).ip_index, + (try mod.intValue(Type.comptime_int, i)).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = enum_field_ty.ip_index, + .ty = enum_field_ty.toIntern(), .storage = .{ .elems = &enum_field_fields }, } }); } @@ -16359,23 +16427,23 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fields_val = v: { const fields_array_ty = try mod.arrayType(.{ .len = enum_field_vals.len, - .child = enum_field_ty.ip_index, + .child = enum_field_ty.toIntern(), .sentinel = .none, }); const new_decl = try fields_anon_decl.finish( fields_array_ty, (try mod.intern(.{ .aggregate = .{ - .ty = fields_array_ty.ip_index, + .ty = fields_array_ty.toIntern(), .storage = .{ .elems = enum_field_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = enum_field_ty.ip_index, + .elem_type = enum_field_ty.toIntern(), .size = .Slice, .is_const = true, - })).ip_index, + })).toIntern(), .addr = .{ .decl = new_decl }, } }); }; @@ -16403,13 +16471,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_exhaustive: bool, - is_exhaustive.ip_index, + is_exhaustive.toIntern(), }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = type_enum_ty.ip_index, + .ty = type_enum_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); @@ -16460,14 +16528,21 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, } }); }; @@ -16481,12 +16556,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // type: type, - field.ty.ip_index, + field.ty.toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, alignment)).ip_index, + (try mod.intValue(Type.comptime_int, alignment)).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = union_field_ty.ip_index, + .ty = union_field_ty.toIntern(), .storage = .{ .elems = &union_field_fields }, } }); } @@ -16494,33 +16569,33 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fields_val = v: { const array_fields_ty = try mod.arrayType(.{ .len = union_field_vals.len, - .child = union_field_ty.ip_index, + .child = union_field_ty.toIntern(), .sentinel = .none, }); const new_decl = try fields_anon_decl.finish( array_fields_ty, (try mod.intern(.{ .aggregate = .{ - .ty = array_fields_ty.ip_index, + .ty = array_fields_ty.toIntern(), .storage = .{ .elems = union_field_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = union_field_ty.ip_index, + .elem_type = union_field_ty.toIntern(), .size = .Slice, .is_const = true, - })).ip_index, + })).toIntern(), .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, union_field_vals.len)).ip_index, + .len = (try mod.intValue(Type.usize, union_field_vals.len)).toIntern(), } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod)); const enum_tag_ty_val = try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType(.type_type)).ip_index, - .val = if (union_ty.unionTagType(mod)) |tag_ty| tag_ty.ip_index else .none, + .ty = (try mod.optionalType(.type_type)).toIntern(), + .val = if (union_ty.unionTagType(mod)) |tag_ty| tag_ty.toIntern() else .none, } }); const container_layout_ty = t: { @@ -16538,7 +16613,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = .{ // layout: ContainerLayout, - (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index, + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).toIntern(), // tag_type: ?type, enum_tag_ty_val, @@ -16548,10 +16623,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai decls_val, }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = type_union_ty.ip_index, + .ty = type_union_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); @@ -16595,7 +16670,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var struct_field_vals: []InternPool.Index = &.{}; defer gpa.free(struct_field_vals); fv: { - const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |tuple| { struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len); for ( @@ -16611,16 +16686,24 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // https://github.com/ziglang/zig/issues/15709 @as([]const u8, mod.intern_pool.stringToSlice(tuple.names[i])) else - try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); + try std.fmt.allocPrint(sema.arena, "{d}", .{i}); + const new_decl_ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes.ptr[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = bytes }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + .len = (try mod.intValue(Type.usize, bytes.len)).toIntern(), } }); }; @@ -16633,14 +16716,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // type: type, field_ty, // default_value: ?*const anyopaque, - default_val_ptr.ip_index, + default_val_ptr.toIntern(), // is_comptime: bool, - Value.makeBool(is_comptime).ip_index, + Value.makeBool(is_comptime).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).ip_index, + (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).toIntern(), }; struct_field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = struct_field_ty.ip_index, + .ty = struct_field_ty.toIntern(), .storage = .{ .elems = &struct_field_fields }, } }); } @@ -16660,20 +16743,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); }; - const opt_default_val = if (field.default_val.ip_index == .unreachable_value) + const opt_default_val = if (field.default_val.toIntern() == .unreachable_value) null else field.default_val; @@ -16684,16 +16774,16 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // type: type, - field.ty.ip_index, + field.ty.toIntern(), // default_value: ?*const anyopaque, - default_val_ptr.ip_index, + default_val_ptr.toIntern(), // is_comptime: bool, - Value.makeBool(field.is_comptime).ip_index, + Value.makeBool(field.is_comptime).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, alignment)).ip_index, + (try mod.intValue(Type.comptime_int, alignment)).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = struct_field_ty.ip_index, + .ty = struct_field_ty.toIntern(), .storage = .{ .elems = &struct_field_fields }, } }); } @@ -16702,37 +16792,37 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fields_val = v: { const array_fields_ty = try mod.arrayType(.{ .len = struct_field_vals.len, - .child = struct_field_ty.ip_index, + .child = struct_field_ty.toIntern(), .sentinel = .none, }); const new_decl = try fields_anon_decl.finish( array_fields_ty, (try mod.intern(.{ .aggregate = .{ - .ty = array_fields_ty.ip_index, + .ty = array_fields_ty.toIntern(), .storage = .{ .elems = struct_field_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = struct_field_ty.ip_index, + .elem_type = struct_field_ty.toIntern(), .size = .Slice, .is_const = true, - })).ip_index, + })).toIntern(), .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, struct_field_vals.len)).ip_index, + .len = (try mod.intValue(Type.usize, struct_field_vals.len)).toIntern(), } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespaceIndex(mod)); const backing_integer_val = try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType(.type_type)).ip_index, + .ty = (try mod.optionalType(.type_type)).toIntern(), .val = if (layout == .Packed) val: { const struct_obj = mod.typeToStruct(struct_ty).?; assert(struct_obj.haveLayout()); assert(struct_obj.backing_int_ty.isInt(mod)); - break :val struct_obj.backing_int_ty.ip_index; + break :val struct_obj.backing_int_ty.toIntern(); } else .none, } }); @@ -16751,7 +16841,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = [_]InternPool.Index{ // layout: ContainerLayout, - (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index, + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).toIntern(), // backing_integer: ?type, backing_integer_val, // fields: []const StructField, @@ -16759,13 +16849,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_tuple: bool, - Value.makeBool(struct_ty.isTuple(mod)).ip_index, + Value.makeBool(struct_ty.isTuple(mod)).toIntern(), }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = type_struct_ty.ip_index, + .ty = type_struct_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); @@ -16794,10 +16884,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai decls_val, }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = type_opaque_ty.ip_index, + .ty = type_opaque_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); @@ -16845,25 +16935,25 @@ fn typeInfoDecls( const array_decl_ty = try mod.arrayType(.{ .len = decl_vals.items.len, - .child = declaration_ty.ip_index, + .child = declaration_ty.toIntern(), .sentinel = .none, }); const new_decl = try decls_anon_decl.finish( array_decl_ty, (try mod.intern(.{ .aggregate = .{ - .ty = array_decl_ty.ip_index, + .ty = array_decl_ty.toIntern(), .storage = .{ .elems = decl_vals.items }, } })).toValue(), 0, // default alignment ); return try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = declaration_ty.ip_index, + .elem_type = declaration_ty.toIntern(), .size = .Slice, .is_const = true, - })).ip_index, + })).toIntern(), .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, decl_vals.items.len)).ip_index, + .len = (try mod.intValue(Type.usize, decl_vals.items.len)).toIntern(), } }); } @@ -16892,16 +16982,24 @@ fn typeInfoNamespaceDecls( const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0)); + const name = mem.span(decl.name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); }; @@ -16909,10 +17007,10 @@ fn typeInfoNamespaceDecls( //name: []const u8, name_val, //is_pub: bool, - Value.makeBool(decl.is_pub).ip_index, + Value.makeBool(decl.is_pub).toIntern(), }; try decl_vals.append(try mod.intern(.{ .aggregate = .{ - .ty = declaration_ty.ip_index, + .ty = declaration_ty.toIntern(), .storage = .{ .elems = &fields }, } })); } @@ -16985,7 +17083,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); return mod.vectorType(.{ .len = operand.vectorLen(mod), - .child = log2_elem_ty.ip_index, + .child = log2_elem_ty.toIntern(), }); }, else => {}, @@ -17527,7 +17625,7 @@ fn zirRetErrValue( const kv = try mod.getErrorValue(err_name); const error_set_type = try mod.singleErrorSetType(err_name); const result_inst = try sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), } })).toValue()); return sema.analyzeRet(block, result_inst, src); @@ -17854,9 +17952,9 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known"); // Check if this happens to be the lazy alignment of our element type, in // which case we can make this 0 without resolving it. - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { - .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.ip_index) break :blk .none, + .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.toIntern()) break :blk .none, else => {}, }, else => {}, @@ -17985,7 +18083,7 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com } } return sema.addConstant(obj_ty, (try mod.intern(.{ .aggregate = .{ - .ty = obj_ty.ip_index, + .ty = obj_ty.toIntern(), .storage = .{ .elems = &.{} }, } })).toValue()); } @@ -18021,10 +18119,11 @@ fn unionInit( const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); - return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = init_val, - })); + return sema.addConstant(union_ty, (try mod.intern(.{ .un = .{ + .ty = union_ty.toIntern(), + .tag = try tag_val.intern(tag_ty, mod), + .val = try init_val.intern(field.ty, mod), + } })).toValue()); } try sema.requireRuntimeBlock(block, init_src, null); @@ -18125,12 +18224,12 @@ fn zirStructInit( const init_inst = try sema.resolveInst(item.data.init); if (try sema.resolveMaybeUndefVal(init_inst)) |val| { - return sema.addConstantMaybeRef( - block, - resolved_ty, - try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val }), - is_ref, - ); + const field = resolved_ty.unionFields(mod).values()[field_index]; + return sema.addConstantMaybeRef(block, resolved_ty, (try mod.intern(.{ .un = .{ + .ty = resolved_ty.toIntern(), + .tag = try tag_val.intern(tag_ty, mod), + .val = try val.intern(field.ty, mod), + } })).toValue(), is_ref); } if (is_ref) { @@ -18171,7 +18270,7 @@ fn finishStructInit( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |anon_struct| { for (anon_struct.types, anon_struct.values, 0..) |field_ty, default_val, i| { if (field_inits[i] != .none) continue; @@ -18204,7 +18303,7 @@ fn finishStructInit( for (struct_obj.fields.values(), 0..) |field, i| { if (field_inits[i] != .none) continue; - if (field.default_val.ip_index == .unreachable_value) { + if (field.default_val.toIntern() == .unreachable_value) { const field_name = struct_obj.fields.keys()[i]; const template = "missing struct field: {s}"; const args = .{field_name}; @@ -18250,7 +18349,7 @@ fn finishStructInit( .intern(struct_ty.structFieldType(field_i, mod), mod); } const struct_val = try mod.intern(.{ .aggregate = .{ - .ty = struct_ty.ip_index, + .ty = struct_ty.toIntern(), .storage = .{ .elems = elems }, } }); return sema.addConstantMaybeRef(block, struct_ty, struct_val.toValue(), is_ref); @@ -18331,7 +18430,7 @@ fn zirStructInitAnon( gop.value_ptr.* = i; const init = try sema.resolveInst(item.data.init); - field_ty.* = sema.typeOf(init).ip_index; + field_ty.* = sema.typeOf(init).toIntern(); if (field_ty.toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); @@ -18464,15 +18563,19 @@ fn zirArrayInit( } else null; const runtime_index = opt_runtime_index orelse { - const elem_vals = try sema.arena.alloc(Value, resolved_args.len); - - for (resolved_args, 0..) |arg, i| { + const elem_vals = try sema.arena.alloc(InternPool.Index, resolved_args.len); + for (elem_vals, resolved_args, 0..) |*val, arg, i| { + const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) + array_ty.structFieldType(i, mod) + else + array_ty.elemType2(mod); // We checked that all args are comptime above. - elem_vals[i] = (sema.resolveMaybeUndefVal(arg) catch unreachable).?; + val.* = try ((sema.resolveMaybeUndefVal(arg) catch unreachable).?).intern(elem_ty, mod); } - - const array_val = try Value.Tag.aggregate.create(sema.arena, elem_vals); - return sema.addConstantMaybeRef(block, array_ty, array_val, is_ref); + return sema.addConstantMaybeRef(block, array_ty, (try mod.intern(.{ .aggregate = .{ + .ty = array_ty.toIntern(), + .storage = .{ .elems = elem_vals }, + } })).toValue(), is_ref); }; sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { @@ -18548,7 +18651,7 @@ fn zirArrayInitAnon( for (operands, 0..) |operand, i| { const operand_src = src; // TODO better source location const elem = try sema.resolveInst(operand); - types[i] = sema.typeOf(elem).ip_index; + types[i] = sema.typeOf(elem).toIntern(); if (types[i].toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); @@ -18560,7 +18663,7 @@ fn zirArrayInitAnon( return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(elem)) |val| { - values[i] = val.ip_index; + values[i] = val.toIntern(); } else { values[i] = .none; runtime_src = operand_src; @@ -18676,7 +18779,7 @@ fn fieldType( const resolved_ty = try sema.resolveTypeFields(cur_ty); cur_ty = resolved_ty; switch (cur_ty.zigTypeTag(mod)) { - .Struct => switch (mod.intern_pool.indexToKey(cur_ty.ip_index)) { + .Struct => switch (mod.intern_pool.indexToKey(cur_ty.toIntern())) { .anon_struct_type => |anon_struct| { const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); return sema.addType(anon_struct.types[field_index].toType()); @@ -18698,7 +18801,7 @@ fn fieldType( .Optional => { // Struct/array init through optional requires the child type to not be a pointer. // If the child of .optional is a pointer it'll error on the next loop. - cur_ty = mod.intern_pool.indexToKey(cur_ty.ip_index).opt_type.toType(); + cur_ty = mod.intern_pool.indexToKey(cur_ty.toIntern()).opt_type.toType(); continue; }, .ErrorUnion => { @@ -18776,7 +18879,7 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - const err_name = sema.mod.intern_pool.indexToKey(val.ip_index).err.name; + const err_name = sema.mod.intern_pool.indexToKey(val.toIntern()).err.name; const bytes = sema.mod.intern_pool.stringToSlice(err_name); return sema.addStrLit(block, bytes); } @@ -18820,21 +18923,21 @@ fn zirUnaryMath( const vec_len = operand_ty.vectorLen(mod); const result_ty = try mod.vectorType(.{ .len = vec_len, - .child = scalar_ty.ip_index, + .child = scalar_ty.toIntern(), }); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(result_ty); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(sema.mod, i); - elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod); + elem.* = try (try eval(elem_val, scalar_ty, sema.arena, sema.mod)).intern(scalar_ty, mod); } - return sema.addConstant( - result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } try sema.requireRuntimeBlock(block, operand_src, null); @@ -18867,7 +18970,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); - const tag_name = mod.intern_pool.indexToKey(val.ip_index).enum_literal; + const tag_name = mod.intern_pool.indexToKey(val.toIntern()).enum_literal; const bytes = mod.intern_pool.stringToSlice(tag_name); return sema.addStrLit(block, bytes); }, @@ -18956,7 +19059,7 @@ fn zirReify( .AnyFrame => return sema.failWithUseOfAsync(block, src), .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const signedness_val = try union_val.val.fieldValue(mod, fields.getIndex("signedness").?); const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); @@ -18966,7 +19069,7 @@ fn zirReify( return sema.addType(ty); }, .Vector => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); @@ -18977,12 +19080,12 @@ fn zirReify( const ty = try mod.vectorType(.{ .len = len, - .child = child_ty.ip_index, + .child = child_ty.toIntern(), }); return sema.addType(ty); }, .Float => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); @@ -18997,7 +19100,7 @@ fn zirReify( return sema.addType(ty); }, .Pointer => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const size_val = try union_val.val.fieldValue(mod, fields.getIndex("size").?); const is_const_val = try union_val.val.fieldValue(mod, fields.getIndex("is_const").?); const is_volatile_val = try union_val.val.fieldValue(mod, fields.getIndex("is_volatile").?); @@ -19088,7 +19191,7 @@ fn zirReify( return sema.addType(ty); }, .Array => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?); @@ -19107,7 +19210,7 @@ fn zirReify( return sema.addType(ty); }, .Optional => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); const child_ty = child_val.toType(); @@ -19116,7 +19219,7 @@ fn zirReify( return sema.addType(ty); }, .ErrorUnion => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const error_set_val = try union_val.val.fieldValue(mod, fields.getIndex("error_set").?); const payload_val = try union_val.val.fieldValue(mod, fields.getIndex("payload").?); @@ -19155,7 +19258,7 @@ fn zirReify( return sema.addType(ty); }, .Struct => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?); const backing_integer_val = try union_val.val.fieldValue(mod, fields.getIndex("backing_integer").?); const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); @@ -19176,7 +19279,7 @@ fn zirReify( return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool(mod)); }, .Enum => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?); const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); @@ -19517,7 +19620,7 @@ fn zirReify( return sema.analyzeDeclVal(block, src, new_decl_index); }, .Fn => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const calling_convention_val = try union_val.val.fieldValue(mod, fields.getIndex("calling_convention").?); const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?); const is_generic_val = try union_val.val.fieldValue(mod, fields.getIndex("is_generic").?); @@ -19571,7 +19674,7 @@ fn zirReify( const param_type_val = param_type_opt_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); - param_type.* = param_type_val.ip_index; + param_type.* = param_type_val.toIntern(); if (arg_is_noalias) { if (!param_type.toType().isPtrAtRuntime(mod)) { @@ -19733,7 +19836,7 @@ fn reifyStruct( opt_val; break :blk try payload_val.copy(new_decl_arena_allocator); } else Value.@"unreachable"; - if (is_comptime_val.toBool(mod) and default_val.ip_index == .unreachable_value) { + if (is_comptime_val.toBool(mod) and default_val.toIntern() == .unreachable_value) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } @@ -19956,9 +20059,17 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const bytes = try ty.nameAllocArena(anon_decl.arena(), mod); + const decl_ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = decl_ty.toIntern(), + .storage = .{ .bytes = bytes }, + } })).toValue(), 0, // default alignment ); @@ -20125,8 +20236,8 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat break :disjoint false; } - if (!ip.isInferredErrorSetType(dest_ty.ip_index) and - !ip.isInferredErrorSetType(operand_ty.ip_index)) + if (!ip.isInferredErrorSetType(dest_ty.toIntern()) and + !ip.isInferredErrorSetType(operand_ty.toIntern())) { break :disjoint true; } @@ -20157,7 +20268,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (maybe_operand_val) |val| { if (!dest_ty.isAnyError(mod)) { - const error_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(val.ip_index).err.name); + const error_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(val.toIntern()).err.name); if (!dest_ty.errorSetHasField(error_name, mod)) { const msg = msg: { const msg = try sema.errMsg( @@ -20257,11 +20368,11 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (dest_ty.zigTypeTag(mod) == .Optional) { var dest_ptr_info = dest_ty.optionalChild(mod).ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info), mod); + break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, mod, dest_ptr_info), mod); } else { var dest_ptr_info = dest_ty.ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.ptr(sema.arena, sema.mod, dest_ptr_info); + break :blk try Type.ptr(sema.arena, mod, dest_ptr_info); } }; @@ -20279,10 +20390,10 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air errdefer msg.destroy(sema.gpa); try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{ - operand_ty.fmt(sema.mod), operand_align, + operand_ty.fmt(mod), operand_align, }); try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{ - dest_ty.fmt(sema.mod), dest_align, + dest_ty.fmt(mod), dest_align, }); try sema.errNote(block, src, msg, "consider using '@alignCast'", .{}); @@ -20296,11 +20407,11 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithUseOfUndef(block, operand_src); } if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { - return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); + return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); } if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) { return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{ - .ty = dest_ty.ip_index, + .ty = dest_ty.toIntern(), .val = operand_val.toIntern(), } })).toValue()); } @@ -20335,7 +20446,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData var ptr_info = operand_ty.ptrInfo(mod); ptr_info.mutable = true; - const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + const dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { return sema.addConstant(dest_ty, operand_val); @@ -20356,7 +20467,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD var ptr_info = operand_ty.ptrInfo(mod); ptr_info.@"volatile" = false; - const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + const dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { return sema.addConstant(dest_ty, operand_val); @@ -20382,7 +20493,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const dest_ty = if (is_vector) try mod.vectorType(.{ .len = operand_ty.vectorLen(mod), - .child = dest_scalar_ty.ip_index, + .child = dest_scalar_ty.toIntern(), }) else dest_scalar_ty; @@ -20405,7 +20516,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (operand_info.signedness != dest_info.signedness) { return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{ - @tagName(dest_info.signedness), operand_ty.fmt(sema.mod), + @tagName(dest_info.signedness), operand_ty.fmt(mod), }); } if (operand_info.bits < dest_info.bits) { @@ -20414,7 +20525,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, "destination type '{}' has more bits than source type '{}'", - .{ dest_ty.fmt(sema.mod), operand_ty.fmt(sema.mod) }, + .{ dest_ty.fmt(mod), operand_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination type has {d} bits", .{ @@ -20434,18 +20545,18 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!is_vector) { return sema.addConstant( dest_ty, - try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod), + try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod), ); } - const elems = try sema.arena.alloc(Value, operand_ty.vectorLen(mod)); + const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); - elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, mod)).intern(dest_scalar_ty, mod); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -20466,7 +20577,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A var ptr_info = ptr_ty.ptrInfo(mod); ptr_info.@"align" = dest_align; - var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + var dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (ptr_ty.zigTypeTag(mod) == .Optional) { dest_ty = try mod.optionalType(dest_ty.toIntern()); } @@ -20531,22 +20642,22 @@ fn zirBitCount( const vec_len = operand_ty.vectorLen(mod); const result_ty = try mod.vectorType(.{ .len = vec_len, - .child = result_scalar_ty.ip_index, + .child = result_scalar_ty.toIntern(), }); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(result_ty); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); + const elem_val = try val.elemValue(mod, i); const count = comptimeOp(elem_val, scalar_ty, mod); - elem.* = try mod.intValue(scalar_ty, count); + elem.* = (try mod.intValue(scalar_ty, count)).toIntern(); } - return sema.addConstant( - result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_ty, operand); @@ -20580,7 +20691,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, operand_src, "@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits", - .{ scalar_ty.fmt(sema.mod), bits }, + .{ scalar_ty.fmt(mod), bits }, ); } @@ -20605,15 +20716,15 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); - elem.* = try elem_val.byteSwap(operand_ty, mod, sema.arena); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.byteSwap(operand_ty, mod, sema.arena)).intern(scalar_ty, mod); } - return sema.addConstant( - operand_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_ty, (try mod.intern(.{ .aggregate = .{ + .ty = operand_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -20653,15 +20764,15 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); - elem.* = try elem_val.bitReverse(scalar_ty, mod, sema.arena); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.bitReverse(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod); } - return sema.addConstant( - operand_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_ty, (try mod.intern(.{ .aggregate = .{ + .ty = operand_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -20699,7 +20810,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 .Struct => {}, else => { const msg = msg: { - const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, ty); break :msg msg; @@ -20738,7 +20849,7 @@ fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Com const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .Struct, .Enum, .Union, .Opaque => return, - else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(mod)}), } } @@ -20748,7 +20859,7 @@ fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileEr switch (try ty.zigTypeTagOrPoison(mod)) { .ComptimeInt => return true, .Int => return false, - else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(mod)}), } } @@ -20807,7 +20918,7 @@ fn checkPtrOperand( block, ty_src, "expected pointer, found '{}'", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); @@ -20820,7 +20931,7 @@ fn checkPtrOperand( .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); } fn checkPtrType( @@ -20838,7 +20949,7 @@ fn checkPtrType( block, ty_src, "expected pointer type, found '{}'", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); @@ -20851,7 +20962,7 @@ fn checkPtrType( .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); } fn checkVectorElemType( @@ -20865,7 +20976,7 @@ fn checkVectorElemType( .Int, .Float, .Bool => return, else => if (ty.isPtrAtRuntime(mod)) return, } - return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(mod)}); } fn checkFloatType( @@ -20877,7 +20988,7 @@ fn checkFloatType( const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .ComptimeInt, .ComptimeFloat, .Float => {}, - else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(mod)}), } } @@ -20894,7 +21005,7 @@ fn checkNumericType( .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, - else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(mod)}), } } @@ -20928,7 +21039,7 @@ fn checkAtomicPtrOperand( block, elem_ty_src, "expected bool, integer, float, enum, or pointer type; found '{}'", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ), }; @@ -20943,7 +21054,7 @@ fn checkAtomicPtrOperand( const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { .Pointer => ptr_ty.ptrInfo(mod), else => { - const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); + const wanted_ptr_ty = try Type.ptr(sema.arena, mod, wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); unreachable; }, @@ -20953,7 +21064,7 @@ fn checkAtomicPtrOperand( wanted_ptr_data.@"allowzero" = ptr_data.@"allowzero"; wanted_ptr_data.@"volatile" = ptr_data.@"volatile"; - const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); + const wanted_ptr_ty = try Type.ptr(sema.arena, mod, wanted_ptr_data); const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); return casted_ptr; @@ -21016,12 +21127,12 @@ fn checkIntOrVector( switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(sema.mod), + elem_ty.fmt(mod), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } } @@ -21040,12 +21151,12 @@ fn checkIntOrVectorAllowComptime( switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(sema.mod), + elem_ty.fmt(mod), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } } @@ -21054,7 +21165,7 @@ fn checkErrorSetType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Comp const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .ErrorSet => return, - else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(mod)}), } } @@ -21142,7 +21253,7 @@ fn checkVectorizableBinaryOperands( } else { const msg = msg: { const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: '{}' and '{}'", .{ - lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod), + lhs_ty.fmt(mod), rhs_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (lhs_is_vector) { @@ -21161,7 +21272,7 @@ fn checkVectorizableBinaryOperands( fn maybeOptionsSrc(sema: *Sema, block: *Block, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { if (base_src == .unneeded) return .unneeded; const mod = sema.mod; - return mod.optionsSrc(sema.mod.declPtr(block.src_decl), base_src, wanted); + return mod.optionsSrc(mod.declPtr(block.src_decl), base_src, wanted); } fn resolveExportOptions( @@ -21282,7 +21393,7 @@ fn zirCmpxchg( block, elem_ty_src, "expected bool, integer, enum, or pointer type; found '{}'", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ); } const uncasted_ptr = try sema.resolveInst(extra.ptr); @@ -21322,8 +21433,8 @@ fn zirCmpxchg( const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const result_val = try mod.intern(.{ .opt = .{ - .ty = result_ty.ip_index, - .val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: { + .ty = result_ty.toIntern(), + .val = if (stored_val.eql(expected_val, elem_ty, mod)) blk: { try sema.storePtr(block, src, ptr, new_value); break :blk .none; } else stored_val.toIntern(), @@ -21363,7 +21474,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I try sema.checkVectorElemType(block, scalar_src, scalar_ty); const vector_ty = try mod.vectorType(.{ .len = len, - .child = scalar_ty.ip_index, + .child = scalar_ty.toIntern(), }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty); @@ -21489,7 +21600,7 @@ fn analyzeShuffle( const res_ty = try mod.vectorType(.{ .len = mask_len, - .child = elem_ty.ip_index, + .child = elem_ty.toIntern(), }); var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) { @@ -21516,11 +21627,11 @@ fn analyzeShuffle( const a_ty = try mod.vectorType(.{ .len = a_len, - .child = elem_ty.ip_index, + .child = elem_ty.toIntern(), }); const b_ty = try mod.vectorType(.{ .len = b_len, - .child = elem_ty.ip_index, + .child = elem_ty.toIntern(), }); if (maybe_a_len == null) a = try sema.addConstUndef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src); @@ -21567,25 +21678,21 @@ fn analyzeShuffle( if (try sema.resolveMaybeUndefVal(a)) |a_val| { if (try sema.resolveMaybeUndefVal(b)) |b_val| { - const values = try sema.arena.alloc(Value, mask_len); - - i = 0; - while (i < mask_len) : (i += 1) { + const values = try sema.arena.alloc(InternPool.Index, mask_len); + for (values) |*value| { const mask_elem_val = try mask.elemValue(sema.mod, i); if (mask_elem_val.isUndef(mod)) { - values[i] = Value.undef; + value.* = try mod.intern(.{ .undef = elem_ty.toIntern() }); continue; } const int = mask_elem_val.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); - if (int >= 0) { - values[i] = try a_val.elemValue(sema.mod, unsigned); - } else { - values[i] = try b_val.elemValue(sema.mod, unsigned); - } + values[i] = try (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).intern(elem_ty, mod); } - const res_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(res_ty, res_val); + return sema.addConstant(res_ty, (try mod.intern(.{ .aggregate = .{ + .ty = res_ty.toIntern(), + .storage = .{ .elems = values }, + } })).toValue()); } } @@ -21599,22 +21706,25 @@ fn analyzeShuffle( const max_src = if (a_len > b_len) a_src else b_src; const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len)); - const expand_mask_values = try sema.arena.alloc(Value, max_len); + const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); i = 0; while (i < min_len) : (i += 1) { - expand_mask_values[i] = try mod.intValue(Type.comptime_int, i); + expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern(); } while (i < max_len) : (i += 1) { - expand_mask_values[i] = try mod.intValue(Type.comptime_int, -1); + expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern(); } - const expand_mask = try Value.Tag.aggregate.create(sema.arena, expand_mask_values); + const expand_mask = try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = @intCast(u32, max_len), .child = .comptime_int_type })).toIntern(), + .storage = .{ .elems = expand_mask_values }, + } }); if (a_len < b_len) { const undef = try sema.addConstUndef(a_ty); - a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask, @intCast(u32, max_len)); + a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @intCast(u32, max_len)); } else { const undef = try sema.addConstUndef(b_ty); - b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask, @intCast(u32, max_len)); + b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @intCast(u32, max_len)); } } @@ -21651,7 +21761,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { .Vector, .Array => pred_ty.arrayLen(mod), - else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}), + else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(mod)}), }; const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64)); @@ -21663,7 +21773,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const vec_ty = try mod.vectorType(.{ .len = vec_len, - .child = elem_ty.ip_index, + .child = elem_ty.toIntern(), }); const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src); const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src); @@ -21681,21 +21791,17 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C if (maybe_b) |b_val| { if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); - const elems = try sema.gpa.alloc(Value, vec_len); + const elems = try sema.gpa.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const pred_elem_val = try pred_val.elemValue(sema.mod, i); + const pred_elem_val = try pred_val.elemValue(mod, i); const should_choose_a = pred_elem_val.toBool(mod); - if (should_choose_a) { - elem.* = try a_val.elemValue(sema.mod, i); - } else { - elem.* = try b_val.elemValue(sema.mod, i); - } + elem.* = try (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).intern(elem_ty, mod); } - return sema.addConstant( - vec_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(vec_ty, (try mod.intern(.{ .aggregate = .{ + .ty = vec_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { break :rs b_src; } @@ -22019,7 +22125,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const args = try sema.resolveInst(extra.args); const args_ty = sema.typeOf(args); - if (!args_ty.isTuple(mod) and args_ty.ip_index != .empty_struct_type) { + if (!args_ty.isTuple(mod) and args_ty.toIntern() != .empty_struct_type) { return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)}); } @@ -22102,7 +22208,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { - const field = switch (mod.intern_pool.indexToKey(field_ptr_val.ip_index)) { + const field = switch (mod.intern_pool.indexToKey(field_ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .field => |field| field, else => null, @@ -22244,16 +22350,16 @@ fn analyzeMinMax( cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); continue; }; - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const lhs_elem_val = try cur_val.elemValue(mod, i); const rhs_elem_val = try operand_val.elemValue(mod, i); - elem.* = opFunc(lhs_elem_val, rhs_elem_val, mod); + elem.* = try opFunc(lhs_elem_val, rhs_elem_val, mod).intern(simd_op.scalar_ty, mod); } - cur_minmax = try sema.addConstant( - simd_op.result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + cur_minmax = try sema.addConstant(simd_op.result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = simd_op.result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { runtime_known.unset(operand_idx); cur_minmax = try sema.addConstant(sema.typeOf(operand), uncasted_operand_val); @@ -22292,7 +22398,7 @@ fn analyzeMinMax( const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max); break :blk try mod.vectorType(.{ .len = len, - .child = refined_elem_ty.ip_index, + .child = refined_elem_ty.toIntern(), }); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats @@ -22377,7 +22483,7 @@ fn analyzeMinMax( const final_ty = if (is_vector) try mod.vectorType(.{ .len = unrefined_ty.vectorLen(mod), - .child = final_elem_ty.ip_index, + .child = final_elem_ty.toIntern(), }) else final_elem_ty; @@ -22765,7 +22871,7 @@ fn zirVarExtended( try sema.validateVarType(block, ty_src, var_ty, small.is_extern); return sema.addConstant(var_ty, (try mod.intern(.{ .variable = .{ - .ty = var_ty.ip_index, + .ty = var_ty.toIntern(), .init = init_val.toIntern(), .decl = sema.owner_decl_index, .lib_name = if (lib_name) |lname| (try mod.intern_pool.getOrPutString( @@ -23239,7 +23345,7 @@ fn zirBuiltinExtern( { const new_var = try mod.intern(.{ .variable = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .init = .none, .decl = sema.owner_decl_index, .is_extern = true, @@ -23264,7 +23370,7 @@ fn zirBuiltinExtern( try sema.ensureDeclAnalyzed(new_decl_index); const ref = try mod.intern(.{ .ptr = .{ - .ty = (try mod.singleConstPtrType(ty)).ip_index, + .ty = (try mod.singleConstPtrType(ty)).toIntern(), .addr = .{ .decl = new_decl_index }, } }); return sema.addConstant(ty, ref.toValue()); @@ -24207,7 +24313,7 @@ fn fieldVal( switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { const name = try ip.getOrPutString(gpa, field_name); - switch (ip.indexToKey(child_type.ip_index)) { + switch (ip.indexToKey(child_type.toIntern())) { .error_set_type => |error_set_type| blk: { if (error_set_type.nameIndex(ip, name) != null) break :blk; const msg = msg: { @@ -24232,7 +24338,7 @@ fn fieldVal( else try mod.singleErrorSetTypeNts(name); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = name, } })).toValue()); }, @@ -24376,9 +24482,9 @@ fn fieldPtr( if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ - .ty = result_ty.ip_index, + .ty = result_ty.toIntern(), .addr = .{ .field = .{ - .base = val.ip_index, + .base = val.toIntern(), .index = Value.slice_ptr_index, } }, } })).toValue()); @@ -24396,9 +24502,9 @@ fn fieldPtr( if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ - .ty = result_ty.ip_index, + .ty = result_ty.toIntern(), .addr = .{ .field = .{ - .base = val.ip_index, + .base = val.toIntern(), .index = Value.slice_len_index, } }, } })).toValue()); @@ -24429,7 +24535,7 @@ fn fieldPtr( switch (child_type.zigTypeTag(mod)) { .ErrorSet => { const name = try ip.getOrPutString(gpa, field_name); - switch (ip.indexToKey(child_type.ip_index)) { + switch (ip.indexToKey(child_type.toIntern())) { .error_set_type => |error_set_type| blk: { if (error_set_type.nameIndex(ip, name) != null) { break :blk; @@ -24454,7 +24560,7 @@ fn fieldPtr( return sema.analyzeDeclRef(try anon_decl.finish( error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = name, } })).toValue(), 0, // default alignment @@ -24722,9 +24828,9 @@ fn finishFieldCallBind( if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { const pointer = try sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ - .base = struct_ptr_val.ip_index, + .base = struct_ptr_val.toIntern(), .index = field_index, } }, } })).toValue()); @@ -24908,7 +25014,7 @@ fn structFieldPtrByIndex( if (field.is_comptime) { const val = try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .comptime_field = try field.default_val.intern(field.ty, mod) }, } }); return sema.addConstant(ptr_field_ty, val.toValue()); @@ -24916,7 +25022,7 @@ fn structFieldPtrByIndex( if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { const val = try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ .base = try struct_ptr_val.intern(struct_ptr_ty, mod), .index = field_index, @@ -24942,7 +25048,7 @@ fn structFieldVal( assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); - switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); @@ -25116,9 +25222,9 @@ fn unionFieldPtr( .Packed, .Extern => {}, } return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ - .base = union_ptr_val.ip_index, + .base = union_ptr_val.toIntern(), .index = field_index, } }, } })).toValue()); @@ -25413,16 +25519,16 @@ fn tupleFieldPtr( if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, - .addr = .{ .comptime_field = default_val.ip_index }, + .ty = ptr_field_ty.toIntern(), + .addr = .{ .comptime_field = default_val.toIntern() }, } })).toValue()); } if (try sema.resolveMaybeUndefVal(tuple_ptr)) |tuple_ptr_val| { return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ - .base = tuple_ptr_val.ip_index, + .base = tuple_ptr_val.toIntern(), .index = field_index, } }, } })).toValue()); @@ -25787,11 +25893,11 @@ fn coerceExtra( var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (maybe_inst_val) |val| { - if (val.ip_index == .none or val.ip_index == .null_value) { + if (val.ip_index == .none) { // Keep the comptime Value representation; take the new type. return sema.addConstant(dest_ty, val); } else { - const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.ip_index, dest_ty.ip_index); + const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.toIntern(), dest_ty.toIntern()); return sema.addConstant(dest_ty, new_val.toValue()); } } @@ -25816,7 +25922,7 @@ fn coerceExtra( // cast from ?*T and ?[*]T to ?*anyopaque // but don't do it if the source type is a double pointer if (dest_ty.isPtrLikeOptional(mod) and - dest_ty.elemType2(mod).ip_index == .anyopaque_type and + dest_ty.elemType2(mod).toIntern() == .anyopaque_type and inst_ty.isPtrAtRuntime(mod)) anyopaque_check: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional; @@ -25954,7 +26060,7 @@ fn coerceExtra( // cast from *T and [*]T to *anyopaque // but don't do it if the source type is a double pointer - if (dest_info.pointee_type.ip_index == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { + if (dest_info.pointee_type.toIntern() == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const elem_ty = inst_ty.elemType2(mod); if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { @@ -26084,12 +26190,12 @@ fn coerceExtra( // Optional slice is represented with a null pointer so // we use a dummy pointer value with the required alignment. return sema.addConstant(dest_ty, (try mod.intern(.{ .ptr = .{ - .ty = dest_ty.ip_index, + .ty = dest_ty.toIntern(), .addr = .{ .int = (if (dest_info.@"align" != 0) try mod.intValue(Type.usize, dest_info.@"align") else - try dest_info.pointee_type.lazyAbiAlignment(mod)).ip_index }, - .len = (try mod.intValue(Type.usize, 0)).ip_index, + try dest_info.pointee_type.lazyAbiAlignment(mod)).toIntern() }, + .len = (try mod.intValue(Type.usize, 0)).toIntern(), } })).toValue()); } @@ -26166,7 +26272,7 @@ fn coerceExtra( if (!opts.report_err) return error.NotCoercible; return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }); } - const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.ip_index, dest_ty.ip_index); + const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.toIntern(), dest_ty.toIntern()); return try sema.addConstant(dest_ty, new_val.toValue()); } if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { @@ -26258,7 +26364,7 @@ fn coerceExtra( .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const string = mod.intern_pool.indexToKey(val.ip_index).enum_literal; + const string = mod.intern_pool.indexToKey(val.toIntern()).enum_literal; const bytes = mod.intern_pool.stringToSlice(string); const field_index = dest_ty.enumFieldIndex(bytes, mod) orelse { const msg = msg: { @@ -26294,14 +26400,14 @@ fn coerceExtra( .ErrorUnion => switch (inst_ty.zigTypeTag(mod)) { .ErrorUnion => eu: { if (maybe_inst_val) |inst_val| { - switch (inst_val.ip_index) { + switch (inst_val.toIntern()) { .undef => return sema.addConstUndef(dest_ty), - else => switch (mod.intern_pool.indexToKey(inst_val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(inst_val.toIntern())) { .error_union => |error_union| switch (error_union.val) { .err_name => |err_name| { const error_set_ty = inst_ty.errorUnionSet(mod); const error_set_val = try sema.addConstant(error_set_ty, (try mod.intern(.{ .err = .{ - .ty = error_set_ty.ip_index, + .ty = error_set_ty.toIntern(), .name = err_name, } })).toValue()); return sema.wrapErrorUnionSet(block, dest_ty, error_set_val, inst_src); @@ -26597,7 +26703,7 @@ const InMemoryCoercionResult = union(enum) { break; }, .array_sentinel => |sentinel| { - if (sentinel.actual.ip_index != .unreachable_value) { + if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{ sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), }); @@ -26724,7 +26830,7 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_sentinel => |sentinel| { - if (sentinel.actual.ip_index != .unreachable_value) { + if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{ sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), }); @@ -27016,9 +27122,9 @@ fn coerceInMemoryAllowedErrorSets( const dst_ies = mod.inferredErrorSetPtr(dst_ies_index); // We will make an effort to return `ok` without resolving either error set, to // avoid unnecessary "unable to resolve error set" dependency loop errors. - switch (src_ty.ip_index) { + switch (src_ty.toIntern()) { .anyerror_type => {}, - else => switch (ip.indexToKey(src_ty.ip_index)) { + else => switch (ip.indexToKey(src_ty.toIntern())) { .inferred_error_set_type => |src_index| { // If both are inferred error sets of functions, and // the dest includes the source function, the coercion is OK. @@ -27054,15 +27160,15 @@ fn coerceInMemoryAllowedErrorSets( var missing_error_buf = std.ArrayList(InternPool.NullTerminatedString).init(gpa); defer missing_error_buf.deinit(); - switch (src_ty.ip_index) { - .anyerror_type => switch (ip.indexToKey(dest_ty.ip_index)) { + switch (src_ty.toIntern()) { + .anyerror_type => switch (ip.indexToKey(dest_ty.toIntern())) { .inferred_error_set_type => unreachable, // Caught by dest_ty.isAnyError(mod) above. .simple_type => unreachable, // filtered out above .error_set_type => return .from_anyerror, else => unreachable, }, - else => switch (ip.indexToKey(src_ty.ip_index)) { + else => switch (ip.indexToKey(src_ty.toIntern())) { .inferred_error_set_type => |src_index| { const src_data = mod.inferredErrorSetPtr(src_index); @@ -27520,9 +27626,9 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { // allocations is relevant to this function, or why it would have // different behavior depending on whether the types were inferred. // Something seems wrong here. - if (prev_ptr_ty.ip_index == .none) { - if (prev_ptr_ty.ip_index == .inferred_alloc_mut_type) return null; - if (prev_ptr_ty.ip_index == .inferred_alloc_const_type) return null; + switch (prev_ptr_ty.ip_index) { + .inferred_alloc_mut_type, .inferred_alloc_const_type => return null, + else => {}, } const prev_ptr_child_ty = prev_ptr_ty.childType(mod); @@ -27554,11 +27660,11 @@ fn storePtrVal( ) !void { const mod = sema.mod; var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty); - try sema.checkComptimeVarStore(block, src, mut_kit.decl_ref_mut); + try sema.checkComptimeVarStore(block, src, mut_kit.mut_decl); switch (mut_kit.pointee) { .direct => |val_ptr| { - if (mut_kit.decl_ref_mut.runtime_index == .comptime_field_ptr) { + if (mut_kit.mut_decl.runtime_index == .comptime_field_ptr) { if (!operand_val.eql(val_ptr.*, operand_ty, sema.mod)) { // TODO use failWithInvalidComptimeFieldStore return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{}); @@ -27601,7 +27707,7 @@ fn storePtrVal( } const ComptimePtrMutationKit = struct { - decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl, + mut_decl: InternPool.Key.Ptr.Addr.MutDecl, pointee: union(enum) { /// The pointer type matches the actual comptime Value so a direct /// modification is possible. @@ -27627,12 +27733,12 @@ const ComptimePtrMutationKit = struct { decl_arena: std.heap.ArenaAllocator = undefined, fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator { - const decl = mod.declPtr(self.decl_ref_mut.decl); + const decl = mod.declPtr(self.mut_decl.decl); return decl.value_arena.?.acquire(mod.gpa, &self.decl_arena); } fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void { - const decl = mod.declPtr(self.decl_ref_mut.decl); + const decl = mod.declPtr(self.mut_decl.decl); decl.value_arena.?.release(&self.decl_arena); self.decl_arena = undefined; } @@ -27645,99 +27751,85 @@ fn beginComptimePtrMutation( ptr_val: Value, ptr_elem_ty: Type, ) CompileError!ComptimePtrMutationKit { - if (true) unreachable; const mod = sema.mod; - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; - const decl = sema.mod.declPtr(decl_ref_mut.decl_index); - return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, decl_ref_mut); + const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr; + switch (ptr.addr) { + .decl => unreachable, // isComptimeMutablePtr has been checked already + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, mut_decl); }, - .comptime_field_ptr => { - const payload = ptr_val.castTag(.comptime_field_ptr).?.data; + .comptime_field => |comptime_field| { const duped = try sema.arena.create(Value); - duped.* = payload.field_val; - return sema.beginComptimePtrMutationInner(block, src, payload.field_ty, duped, ptr_elem_ty, .{ - .decl_index = @intToEnum(Module.Decl.Index, 0), + duped.* = comptime_field.toValue(); + return sema.beginComptimePtrMutationInner(block, src, mod.intern_pool.typeOf(ptr_val.toIntern()).toType(), duped, ptr_elem_ty, .{ + .decl = undefined, .runtime_index = .comptime_field_ptr, }); }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); - - switch (parent.pointee) { - .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { - .Array, .Vector => { - const check_len = parent.ty.arrayLenIncludingSentinel(mod); - if (elem_ptr.index >= check_len) { - // TODO have the parent include the decl so we can say "declared here" - return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ - elem_ptr.index, check_len, - }); - } - const elem_ty = parent.ty.childType(mod); - - // We might have a pointer to multiple elements of the array (e.g. a pointer - // to a sub-array). In this case, we just have to reinterpret the relevant - // bytes of the whole array rather than any single element. - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); - if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - return .{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = val_ptr, - .byte_offset = elem_abi_size * elem_ptr.index, - } }, - .ty = parent.ty, - }; - } - - switch (val_ptr.ip_index) { - .undef => { - // An array has been initialized to undefined at comptime and now we - // are for the first time setting an element. We must change the representation - // of the array from `undef` to `array`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - @memset(elems, Value.undef); - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + else => unreachable, + } + if (true) unreachable; + switch (ptr_val.toIntern()) { + .none => switch (ptr_val.tag()) { + .decl_ref_mut => { + const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; + const decl = sema.mod.declPtr(decl_ref_mut.decl_index); + return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, decl_ref_mut); + }, + .comptime_field_ptr => { + const payload = ptr_val.castTag(.comptime_field_ptr).?.data; + const duped = try sema.arena.create(Value); + duped.* = payload.field_val; + return sema.beginComptimePtrMutationInner(block, src, payload.field_ty, duped, ptr_elem_ty, .{ + .decl_index = @intToEnum(Module.Decl.Index, 0), + .runtime_index = .comptime_field_ptr, + }); + }, + .elem_ptr => { + const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; + var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); + + switch (parent.pointee) { + .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { + .Array, .Vector => { + const check_len = parent.ty.arrayLenIncludingSentinel(mod); + if (elem_ptr.index >= check_len) { + // TODO have the parent include the decl so we can say "declared here" + return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ + elem_ptr.index, check_len, + }); + } + const elem_ty = parent.ty.childType(mod); + + // We might have a pointer to multiple elements of the array (e.g. a pointer + // to a sub-array). In this case, we just have to reinterpret the relevant + // bytes of the whole array rather than any single element. + const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); + if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { + const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + return .{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .reinterpret = .{ + .val_ptr = val_ptr, + .byte_offset = elem_abi_size * elem_ptr.index, + } }, + .ty = parent.ty, + }; + } - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .none => switch (val_ptr.tag()) { - .bytes => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `bytes` tag, and handle it without making a call to this function. + switch (val_ptr.toIntern()) { + .undef => { + // An array has been initialized to undefined at comptime and now we + // are for the first time setting an element. We must change the representation + // of the array from `undef` to `array`. const arena = parent.beginArena(sema.mod); defer parent.finishArena(sema.mod); - const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - // bytes.len may be one greater than dest_len because of the case when - // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. - assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (elems, 0..) |*elem, i| { - elem.* = try mod.intValue(elem_ty, bytes[i]); - } + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + @memset(elems, Value.undef); val_ptr.* = try Value.Tag.aggregate.create(arena, elems); @@ -27751,392 +27843,383 @@ fn beginComptimePtrMutation( parent.decl_ref_mut, ); }, - .str_lit => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `str_lit` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (bytes, 0..) |byte, i| { - elems[i] = try mod.intValue(elem_ty, byte); - } - if (parent.ty.sentinel(mod)) |sent_val| { - assert(elems.len == bytes.len + 1); - elems[bytes.len] = sent_val; - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( + .none => switch (val_ptr.tag()) { + .bytes => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `bytes` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const bytes = val_ptr.castTag(.bytes).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + // bytes.len may be one greater than dest_len because of the case when + // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. + assert(bytes.len >= dest_len); + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (elems, 0..) |*elem, i| { + elem.* = try mod.intValue(elem_ty, bytes[i]); + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .str_lit => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `str_lit` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const str_lit = val_ptr.castTag(.str_lit).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (bytes, 0..) |byte, i| { + elems[i] = try mod.intValue(elem_ty, byte); + } + if (parent.ty.sentinel(mod)) |sent_val| { + assert(elems.len == bytes.len + 1); + elems[bytes.len] = sent_val; + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .repeated => { + // An array is memory-optimized to store only a single element value, and + // that value is understood to be the same for the entire length of the array. + // However, now we want to modify an individual field and so the + // representation has to change. If we wanted to avoid this, there would + // need to be special detection elsewhere to identify when writing a value to an + // array element that is stored using the `repeated` tag, and handle it + // without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + if (elems.len > 0) elems[0] = repeated_val; + for (elems[1..]) |*elem| { + elem.* = try repeated_val.copy(arena); + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + + .aggregate => return beginComptimePtrMutationInner( sema, block, src, elem_ty, - &elems[elem_ptr.index], + &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], ptr_elem_ty, parent.decl_ref_mut, - ); + ), + + .the_only_possible_value => { + const duped = try sema.arena.create(Value); + duped.* = Value.initTag(.the_only_possible_value); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + duped, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + + else => unreachable, }, - .repeated => { - // An array is memory-optimized to store only a single element value, and - // that value is understood to be the same for the entire length of the array. - // However, now we want to modify an individual field and so the - // representation has to change. If we wanted to avoid this, there would - // need to be special detection elsewhere to identify when writing a value to an - // array element that is stored using the `repeated` tag, and handle it - // without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + else => unreachable, + } + }, + else => { + if (elem_ptr.index != 0) { + // TODO include a "declared here" note for the decl + return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ + elem_ptr.index, + }); + } + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty, + val_ptr, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + }, + .reinterpret => |reinterpret| { + if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) { + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .bad_ptr_ty, + .ty = elem_ptr.elem_ty, + }; + } - const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - if (elems.len > 0) elems[0] = repeated_val; - for (elems[1..]) |*elem| { - elem.* = try repeated_val.copy(arena); - } + const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); + const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .reinterpret = .{ + .val_ptr = reinterpret.val_ptr, + .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, + } }, + .ty = parent.ty, + }; + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + } + }, + .field_ptr => { + const field_ptr = ptr_val.castTag(.field_ptr).?.data; + const field_index = @intCast(u32, field_ptr.field_index); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); + switch (parent.pointee) { + .direct => |val_ptr| switch (val_ptr.toIntern()) { + .undef => { + // A struct or union has been initialized to undefined at comptime and now we + // are for the first time setting a field. We must change the representation + // of the struct/union from `undef` to `struct`/`union`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + switch (parent.ty.zigTypeTag(mod)) { + .Struct => { + const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(fields, Value.undef); + + val_ptr.* = try Value.Tag.aggregate.create(arena, fields); return beginComptimePtrMutationInner( sema, block, src, - elem_ty, - &elems[elem_ptr.index], + parent.ty.structFieldType(field_index, mod), + &fields[field_index], ptr_elem_ty, parent.decl_ref_mut, ); }, + .Union => { + const payload = try arena.create(Value.Payload.Union); + const tag_ty = parent.ty.unionTagTypeHypothetical(mod); + payload.* = .{ .data = .{ + .tag = try mod.enumValueFieldIndex(tag_ty, field_index), + .val = Value.undef, + } }; - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ), + val_ptr.* = Value.initPayload(&payload.base); - .the_only_possible_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); return beginComptimePtrMutationInner( sema, block, src, - elem_ty, - duped, + parent.ty.structFieldType(field_index, mod), + &payload.data.val, ptr_elem_ty, parent.decl_ref_mut, ); }, - + .Pointer => { + assert(parent.ty.isSlice(mod)); + val_ptr.* = try Value.Tag.slice.create(arena, .{ + .ptr = Value.undef, + .len = Value.undef, + }); + + switch (field_index) { + Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.decl_ref_mut, + ), + Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.decl_ref_mut, + ), + + else => unreachable, + } + }, else => unreachable, - }, - else => unreachable, - } - }, - else => { - if (elem_ptr.index != 0) { - // TODO include a "declared here" note for the decl - return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ - elem_ptr.index, - }); - } - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty, - val_ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - }, - .reinterpret => |reinterpret| { - if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) { - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = elem_ptr.elem_ty, - }; - } + } + }, + .empty_struct => { + const duped = try sema.arena.create(Value); + duped.* = Value.initTag(.the_only_possible_value); + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + duped, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .none => switch (val_ptr.tag()) { + .aggregate => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &val_ptr.castTag(.aggregate).?.data[field_index], + ptr_elem_ty, + parent.decl_ref_mut, + ), + .repeated => { + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, - } }, - .ty = parent.ty, - }; - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); - - var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| switch (val_ptr.ip_index) { - .undef => { - // A struct or union has been initialized to undefined at comptime and now we - // are for the first time setting a field. We must change the representation - // of the struct/union from `undef` to `struct`/`union`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - switch (parent.ty.zigTypeTag(mod)) { - .Struct => { - const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(fields, Value.undef); - - val_ptr.* = try Value.Tag.aggregate.create(arena, fields); + const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(elems, val_ptr.castTag(.repeated).?.data); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index, mod), - &fields[field_index], + &elems[field_index], ptr_elem_ty, parent.decl_ref_mut, ); }, - .Union => { - const payload = try arena.create(Value.Payload.Union); - const tag_ty = parent.ty.unionTagTypeHypothetical(mod); - payload.* = .{ .data = .{ - .tag = try mod.enumValueFieldIndex(tag_ty, field_index), - .val = Value.undef, - } }; + .@"union" => { + // We need to set the active field of the union. + const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); - val_ptr.* = Value.initPayload(&payload.base); + const payload = &val_ptr.castTag(.@"union").?.data; + payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index, mod), - &payload.data.val, + &payload.val, ptr_elem_ty, parent.decl_ref_mut, ); }, - .Pointer => { - assert(parent.ty.isSlice(mod)); - val_ptr.* = try Value.Tag.slice.create(arena, .{ - .ptr = Value.undef, - .len = Value.undef, - }); + .slice => switch (field_index) { + Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.decl_ref_mut, + ), - switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), + Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.decl_ref_mut, + ), - else => unreachable, - } + else => unreachable, }, - else => unreachable, - } - }, - .empty_struct => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .none => switch (val_ptr.tag()) { - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &val_ptr.castTag(.aggregate).?.data[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ), - .repeated => { - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(elems, val_ptr.castTag(.repeated).?.data); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &elems[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .@"union" => { - // We need to set the active field of the union. - const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); - - const payload = &val_ptr.castTag(.@"union").?.data; - payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &payload.val, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .slice => switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), else => unreachable, }, - else => unreachable, }, - else => unreachable, - }, - .reinterpret => |reinterpret| { - const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod); - const field_offset = try sema.usizeCast(block, src, field_offset_u64); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + field_offset, - } }, - .ty = parent.ty, - }; - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .eu_payload_ptr => { - const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.errorUnionPayload(mod); - if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, - .ty = payload_ty, - }; - } else { - // An error union has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the error union from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .eu_payload }, - .data = Value.undef, - }; - - val_ptr.* = Value.initPayload(&payload.base); - + .reinterpret => |reinterpret| { + const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod); + const field_offset = try sema.usizeCast(block, src, field_offset_u64); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, + .pointee = .{ .reinterpret = .{ + .val_ptr = reinterpret.val_ptr, + .byte_offset = reinterpret.byte_offset + field_offset, + } }, + .ty = parent.ty, }; - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = eu_ptr.container_ty, - }, - } - }, - .opt_payload_ptr => { - const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { - return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod)); - }; - var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.optionalChild(mod); - switch (val_ptr.ip_index) { - .undef, .null_value => { - // An optional has been initialized to undefined at comptime and now we + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + } + }, + .eu_payload_ptr => { + const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; + var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.errorUnionPayload(mod); + if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, + .ty = payload_ty, + }; + } else { + // An error union has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the - // representation of the optional from `undef` to `opt_payload`. + // representation of the error union from `undef` to `opt_payload`. const arena = parent.beginArena(sema.mod); defer parent.finishArena(sema.mod); const payload = try arena.create(Value.Payload.SubValue); payload.* = .{ - .base = .{ .tag = .opt_payload }, + .base = .{ .tag = .eu_payload }, .data = Value.undef, }; @@ -28147,39 +28230,84 @@ fn beginComptimePtrMutation( .pointee = .{ .direct = &payload.data }, .ty = payload_ty, }; - }, - .none => switch (val_ptr.tag()) { - .opt_payload => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .bad_ptr_ty, + .ty = eu_ptr.container_ty, + }, + } + }, + .opt_payload_ptr => { + const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { + return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod)); + }; + var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.optionalChild(mod); + switch (val_ptr.toIntern()) { + .undef, .null_value => { + // An optional has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the optional from `undef` to `opt_payload`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const payload = try arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .opt_payload }, + .data = Value.undef, + }; + + val_ptr.* = Value.initPayload(&payload.base); + + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; }, + .none => switch (val_ptr.tag()) { + .opt_payload => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, + .ty = payload_ty, + }, + else => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = val_ptr }, + .ty = payload_ty, + }, + }, else => return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .pointee = .{ .direct = val_ptr }, .ty = payload_ty, }, - }, - else => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = opt_ptr.container_ty, - }, - } + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .bad_ptr_ty, + .ty = opt_ptr.container_ty, + }, + } + }, + .decl_ref => unreachable, // isComptimeMutablePtr has been checked already + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr) { + else => unreachable, }, - .decl_ref => unreachable, // isComptimeMutablePtr has been checked already - else => unreachable, } } @@ -28190,13 +28318,13 @@ fn beginComptimePtrMutationInner( decl_ty: Type, decl_val: *Value, ptr_elem_ty: Type, - decl_ref_mut: Value.Payload.DeclRefMut.Data, + mut_decl: InternPool.Key.Ptr.Addr.MutDecl, ) CompileError!ComptimePtrMutationKit { const mod = sema.mod; const target = mod.getTarget(); const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; - const decl = mod.declPtr(decl_ref_mut.decl_index); + const decl = mod.declPtr(mut_decl.decl); var decl_arena: std.heap.ArenaAllocator = undefined; const allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); @@ -28204,7 +28332,7 @@ fn beginComptimePtrMutationInner( if (coerce_ok) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .direct = decl_val }, .ty = decl_ty, }; @@ -28215,7 +28343,7 @@ fn beginComptimePtrMutationInner( const decl_elem_ty = decl_ty.childType(mod); if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .direct = decl_val }, .ty = decl_ty, }; @@ -28224,20 +28352,20 @@ fn beginComptimePtrMutationInner( if (!decl_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, - .pointee = .{ .bad_decl_ty = {} }, + .mut_decl = mut_decl, + .pointee = .bad_decl_ty, .ty = decl_ty, }; } if (!ptr_elem_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, - .pointee = .{ .bad_ptr_ty = {} }, + .mut_decl = mut_decl, + .pointee = .bad_ptr_ty, .ty = ptr_elem_ty, }; } return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = decl_val, .byte_offset = 0, @@ -28282,7 +28410,7 @@ fn beginComptimePtrLoad( const mod = sema.mod; const target = mod.getTarget(); - var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl => blk: { const decl_index = switch (ptr.addr) { @@ -28319,7 +28447,7 @@ fn beginComptimePtrLoad( (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; if (coerce_in_mem_ok) { - const payload_val = switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + const payload_val = switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .error_union => |error_union| switch (error_union.val) { .err_name => |err_name| return sema.fail(block, src, "attempt to unwrap error: {s}", .{mod.intern_pool.stringToSlice(err_name)}), .payload => |payload| payload, @@ -28462,7 +28590,7 @@ fn beginComptimePtrLoad( }, Value.slice_len_index => TypedValue{ .ty = Type.usize, - .val = mod.intern_pool.indexToKey(tv.val.ip_index).ptr.len.toValue(), + .val = mod.intern_pool.indexToKey(tv.val.toIntern()).ptr.len.toValue(), }, else => unreachable, }; @@ -28565,9 +28693,9 @@ fn coerceArrayPtrToSlice( const ptr_array_ty = sema.typeOf(inst); const array_ty = ptr_array_ty.childType(mod); const slice_val = try mod.intern(.{ .ptr = .{ - .ty = dest_ty.ip_index, - .addr = mod.intern_pool.indexToKey(val.ip_index).ptr.addr, - .len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).ip_index, + .ty = dest_ty.toIntern(), + .addr = mod.intern_pool.indexToKey(val.toIntern()).ptr.addr, + .len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(), } }); return sema.addConstant(dest_ty, slice_val.toValue()); } @@ -28643,7 +28771,7 @@ fn coerceCompatiblePtrs( return sema.addConstant(dest_ty, (try mod.intern_pool.getCoerced( sema.gpa, try val.intern(inst_ty, mod), - dest_ty.ip_index, + dest_ty.toIntern(), )).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -28840,7 +28968,7 @@ fn coerceAnonStructToUnion( return sema.failWithOwnedErrorMsg(msg); } - const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + const anon_struct = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; const field_name = mod.intern_pool.stringToSlice(anon_struct.names[0]); const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); @@ -28916,23 +29044,20 @@ fn coerceArrayLike( return block.addBitCast(dest_ty, inst); } - const element_vals = try sema.arena.alloc(Value, dest_len); + const element_vals = try sema.arena.alloc(InternPool.Index, dest_len); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len); var runtime_src: ?LazySrcLoc = null; - for (element_vals, 0..) |*elem, i| { - const index_ref = try sema.addConstant( - Type.usize, - try mod.intValue(Type.usize, i), - ); + for (element_vals, element_refs, 0..) |*val, *ref, i| { + const index_ref = try sema.addConstant(Type.usize, try mod.intValue(Type.usize, i)); const src = inst_src; // TODO better source location const elem_src = inst_src; // TODO better source location const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true); const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src); - element_refs[i] = coerced; + ref.* = coerced; if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| { - elem.* = elem_val; + val.* = try elem_val.intern(dest_elem_ty, mod); } else { runtime_src = elem_src; } @@ -28944,10 +29069,10 @@ fn coerceArrayLike( return block.addAggregateInit(dest_ty, element_refs); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, element_vals), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue()); } /// If the lengths match, coerces element-wise. @@ -28978,25 +29103,26 @@ fn coerceTupleToArray( } const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel(mod)); - const element_vals = try sema.arena.alloc(Value, dest_elems); + const element_vals = try sema.arena.alloc(InternPool.Index, dest_elems); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems); const dest_elem_ty = dest_ty.childType(mod); var runtime_src: ?LazySrcLoc = null; - for (element_vals, 0..) |*elem, i_usize| { + for (element_vals, element_refs, 0..) |*val, *ref, i_usize| { const i = @intCast(u32, i_usize); if (i_usize == inst_len) { - elem.* = dest_ty.sentinel(mod).?; - element_refs[i] = try sema.addConstant(dest_elem_ty, elem.*); + const sentinel_val = dest_ty.sentinel(mod).?; + val.* = sentinel_val.toIntern(); + ref.* = try sema.addConstant(dest_elem_ty, sentinel_val); break; } const elem_src = inst_src; // TODO better source location const elem_ref = try sema.tupleField(block, inst_src, inst, elem_src, i); const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src); - element_refs[i] = coerced; + ref.* = coerced; if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| { - elem.* = elem_val; + val.* = try elem_val.intern(dest_elem_ty, mod); } else { runtime_src = elem_src; } @@ -29008,10 +29134,10 @@ fn coerceTupleToArray( return block.addAggregateInit(dest_ty, element_refs); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, element_vals), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue()); } /// If the lengths match, coerces element-wise. @@ -29079,7 +29205,7 @@ fn coerceTupleToStruct( @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + const anon_struct = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; var runtime_src: ?LazySrcLoc = null; for (0..anon_struct.types.len) |field_index_usize| { const field_i = @intCast(u32, field_index_usize); @@ -29105,8 +29231,7 @@ fn coerceTupleToStruct( } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - assert(field_val.ip_index != .none); - field_vals[field_index] = field_val.ip_index; + field_vals[field_index] = field_val.toIntern(); } else { runtime_src = field_src; } @@ -29123,7 +29248,7 @@ fn coerceTupleToStruct( const field_name = fields.keys()[i]; const field = fields.values()[i]; const field_src = inst_src; // TODO better source location - if (field.default_val.ip_index == .unreachable_value) { + if (field.default_val.toIntern() == .unreachable_value) { const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { @@ -29134,8 +29259,7 @@ fn coerceTupleToStruct( continue; } if (runtime_src == null) { - assert(field.default_val.ip_index != .none); - field_vals[i] = field.default_val.ip_index; + field_vals[i] = field.default_val.toIntern(); } else { field_ref.* = try sema.addConstant(field.ty, field.default_val); } @@ -29152,9 +29276,8 @@ fn coerceTupleToStruct( return block.addAggregateInit(struct_ty, field_refs); } - assert(struct_ty.ip_index != .none); const struct_val = try mod.intern(.{ .aggregate = .{ - .ty = struct_ty.ip_index, + .ty = struct_ty.toIntern(), .storage = .{ .elems = field_vals }, } }); errdefer mod.intern_pool.remove(struct_val); @@ -29170,13 +29293,13 @@ fn coerceTupleToTuple( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; - const dest_tuple = mod.intern_pool.indexToKey(tuple_ty.ip_index).anon_struct_type; + const dest_tuple = mod.intern_pool.indexToKey(tuple_ty.toIntern()).anon_struct_type; const field_vals = try sema.arena.alloc(InternPool.Index, dest_tuple.types.len); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const src_tuple = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + const src_tuple = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; if (src_tuple.types.len > dest_tuple.types.len) return error.NotCoercible; var runtime_src: ?LazySrcLoc = null; @@ -29209,7 +29332,7 @@ fn coerceTupleToTuple( } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - field_vals[field_index] = field_val.ip_index; + field_vals[field_index] = field_val.toIntern(); } else { runtime_src = field_src; } @@ -29269,7 +29392,7 @@ fn coerceTupleToTuple( return sema.addConstant( tuple_ty, (try mod.intern(.{ .aggregate = .{ - .ty = tuple_ty.ip_index, + .ty = tuple_ty.toIntern(), .storage = .{ .elems = field_vals }, } })).toValue(), ); @@ -29349,7 +29472,7 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { try sema.maybeQueueFuncBodyAnalysis(decl); try mod.declareDeclDependency(sema.owner_decl_index, decl); const result = try mod.intern(.{ .ptr = .{ - .ty = (try mod.singleConstPtrType(ty)).ip_index, + .ty = (try mod.singleConstPtrType(ty)).toIntern(), .addr = .{ .decl = decl }, } }); return result.toValue(); @@ -29360,8 +29483,8 @@ fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value { const val = opt_val orelse return Value.null; const ptr_val = try sema.refValue(block, ty, val); const result = try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType((try mod.singleConstPtrType(ty)).ip_index)).ip_index, - .val = ptr_val.ip_index, + .ty = (try mod.optionalType((try mod.singleConstPtrType(ty)).toIntern())).toIntern(), + .val = ptr_val.toIntern(), } }); return result.toValue(); } @@ -29382,7 +29505,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); const ptr_ty = try mod.ptrType(.{ - .elem_type = decl_tv.ty.ip_index, + .elem_type = decl_tv.ty.toIntern(), .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), .is_const = if (decl.getVariable(mod)) |variable| variable.is_const else false, .address_space = decl.@"addrspace", @@ -29391,7 +29514,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo try sema.maybeQueueFuncBodyAnalysis(decl_index); } return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_ty.ip_index, + .ty = ptr_ty.toIntern(), .addr = .{ .decl = decl_index }, } })).toValue()); } @@ -29415,13 +29538,10 @@ fn analyzeRef( const operand_ty = sema.typeOf(operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { - switch (val.ip_index) { - .none => {}, - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { - .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), - .func => |func| return sema.analyzeDeclRef(sema.mod.funcPtr(func.index).owner_decl), - else => {}, - }, + switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), + .func => |func| return sema.analyzeDeclRef(sema.mod.funcPtr(func.index).owner_decl), + else => {}, } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -29617,9 +29737,9 @@ fn analyzeIsNonErrComptimeOnly( // exception if the error union error set is known to be empty, // we allow the comparison but always make it comptime-known. const set_ty = operand_ty.errorUnionSet(mod); - switch (set_ty.ip_index) { + switch (set_ty.toIntern()) { .anyerror_type => {}, - else => switch (mod.intern_pool.indexToKey(set_ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(set_ty.toIntern())) { .error_set_type => |error_set_type| { if (error_set_type.names.len == 0) return Air.Inst.Ref.bool_true; }, @@ -30027,7 +30147,7 @@ fn analyzeSlice( return sema.addConstant(return_ty, (try mod.intern_pool.getCoerced( sema.gpa, try new_ptr_val.intern(new_ptr_ty, mod), - return_ty.ip_index, + return_ty.toIntern(), )).toValue()); } @@ -30546,8 +30666,8 @@ fn wrapOptional( ) !Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(inst)) |val| { return sema.addConstant(dest_ty, (try sema.mod.intern(.{ .opt = .{ - .ty = dest_ty.ip_index, - .val = val.ip_index, + .ty = dest_ty.toIntern(), + .val = val.toIntern(), } })).toValue()); } @@ -30567,8 +30687,8 @@ fn wrapErrorUnionPayload( const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false }); if (try sema.resolveMaybeUndefVal(coerced)) |val| { return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{ - .ty = dest_ty.ip_index, - .val = .{ .payload = val.ip_index }, + .ty = dest_ty.toIntern(), + .val = .{ .payload = val.toIntern() }, } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -30588,17 +30708,17 @@ fn wrapErrorUnionSet( const inst_ty = sema.typeOf(inst); const dest_err_set_ty = dest_ty.errorUnionSet(mod); if (try sema.resolveMaybeUndefVal(inst)) |val| { - switch (dest_err_set_ty.ip_index) { + switch (dest_err_set_ty.toIntern()) { .anyerror_type => {}, - else => switch (ip.indexToKey(dest_err_set_ty.ip_index)) { + else => switch (ip.indexToKey(dest_err_set_ty.toIntern())) { .error_set_type => |error_set_type| ok: { - const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; if (error_set_type.nameIndex(ip, expected_name) != null) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, .inferred_error_set_type => |ies_index| ok: { const ies = mod.inferredErrorSetPtr(ies_index); - const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; // We carefully do this in an order that avoids unnecessarily // resolving the destination error set type. @@ -31252,34 +31372,31 @@ pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileErro /// Make it so that calling hash() and eql() on `val` will not assert due /// to a type not having its layout resolved. fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { - switch (val.ip_index) { - .none => {}, - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .u64, .i64, .big_int => {}, - .lazy_align, .lazy_size => |lazy_ty| try sema.resolveTypeLayout(lazy_ty.toType()), - }, - .ptr => |ptr| { - switch (ptr.addr) { - .decl, .mut_decl => {}, - .int => |int| try sema.resolveLazyValue(int.toValue()), - .eu_payload, .opt_payload => |base| try sema.resolveLazyValue(base.toValue()), - .comptime_field => |comptime_field| try sema.resolveLazyValue(comptime_field.toValue()), - .elem, .field => |base_index| try sema.resolveLazyValue(base_index.base.toValue()), - } - if (ptr.len != .none) try sema.resolveLazyValue(ptr.len.toValue()); - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => {}, - .elems => |elems| for (elems) |elem| try sema.resolveLazyValue(elem.toValue()), - .repeated_elem => |elem| try sema.resolveLazyValue(elem.toValue()), - }, - .un => |un| { - try sema.resolveLazyValue(un.tag.toValue()); - try sema.resolveLazyValue(un.val.toValue()); - }, - else => {}, + switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => {}, + .lazy_align, .lazy_size => |lazy_ty| try sema.resolveTypeLayout(lazy_ty.toType()), + }, + .ptr => |ptr| { + switch (ptr.addr) { + .decl, .mut_decl => {}, + .int => |int| try sema.resolveLazyValue(int.toValue()), + .eu_payload, .opt_payload => |base| try sema.resolveLazyValue(base.toValue()), + .comptime_field => |comptime_field| try sema.resolveLazyValue(comptime_field.toValue()), + .elem, .field => |base_index| try sema.resolveLazyValue(base_index.base.toValue()), + } + if (ptr.len != .none) try sema.resolveLazyValue(ptr.len.toValue()); + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => {}, + .elems => |elems| for (elems) |elem| try sema.resolveLazyValue(elem.toValue()), + .repeated_elem => |elem| try sema.resolveLazyValue(elem.toValue()), }, + .un => |un| { + try sema.resolveLazyValue(un.tag.toValue()); + try sema.resolveLazyValue(un.val.toValue()); + }, + else => {}, } } @@ -31617,9 +31734,9 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const mod = sema.mod; - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .empty_struct_type => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => false, .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); @@ -31776,7 +31893,7 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { const child_ty = try sema.resolveTypeFields(ty.childType(mod)); return sema.resolveTypeFully(child_ty); }, - .Struct => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => return sema.resolveStructFully(ty), .anon_struct_type => |tuple| { for (tuple.types) |field_ty| { @@ -31869,7 +31986,7 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { const mod = sema.mod; - switch (ty.ip_index) { + switch (ty.toIntern()) { .var_args_param_type => unreachable, .none => unreachable, @@ -31960,7 +32077,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .call_modifier_type => return sema.getBuiltinType("CallModifier"), .prefetch_options_type => return sema.getBuiltinType("PrefetchOptions"), - _ => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + _ => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return ty; try sema.resolveTypeFieldsStruct(ty, struct_obj); @@ -32605,7 +32722,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } else { // The provided type is the enum tag type. union_obj.tag_ty = provided_ty; - const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) { + const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.toIntern())) { .enum_type => |x| x, else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}), }; @@ -32698,7 +32815,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk val; }; - enum_field_vals[field_i] = copied_val.ip_index; + enum_field_vals[field_i] = copied_val.toIntern(); const gop = enum_field_vals_map.getOrPutAssumeCapacityContext(copied_val, .{ .ty = int_tag_ty, .mod = mod, @@ -32960,7 +33077,7 @@ fn generateUnionTagTypeSimple( .tag_ty = if (enum_field_names.len == 0) .noreturn_type else - (try mod.smallestUnsignedInt(enum_field_names.len - 1)).ip_index, + (try mod.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(), .names = enum_field_names, .values = &.{}, .tag_mode = .auto, @@ -33053,9 +33170,9 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { /// TODO assert the return value matches `ty.onePossibleValue` pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const mod = sema.mod; - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .empty_struct_type => Value.empty_struct, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| { if (int_type.bits == 0) { return try mod.intValue(ty, 0); @@ -33074,13 +33191,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { inline .array_type, .vector_type => |seq_type| { if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, } })).toValue(); if (try sema.typeHasOnePossibleValue(seq_type.child.toType())) |opv| { return (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, - .storage = .{ .repeated_elem = opv.ip_index }, + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = opv.toIntern() }, } })).toValue(); } return null; @@ -33169,7 +33286,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // This TODO is repeated in the redundant implementation of // one-possible-value in type.zig. const empty = try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, } }); return empty.toValue(); @@ -33182,7 +33299,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // In this case the struct has all comptime-known fields and // therefore has one possible value. return (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = tuple.values }, } })).toValue(); }, @@ -33208,9 +33325,9 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse return null; const only = try mod.intern(.{ .un = .{ - .ty = resolved_ty.ip_index, - .tag = tag_val.ip_index, - .val = val_val.ip_index, + .ty = resolved_ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val_val.toIntern(), } }); return only.toValue(); }, @@ -33221,8 +33338,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { if (try sema.typeHasOnePossibleValue(enum_type.tag_ty.toType())) |int_opv| { const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.ip_index, - .int = int_opv.ip_index, + .ty = ty.toIntern(), + .int = int_opv.toIntern(), } }); return only.toValue(); } @@ -33234,7 +33351,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { 1 => { if (enum_type.values.len == 0) { const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = try mod.intern(.{ .int = .{ .ty = enum_type.tag_ty, .storage = .{ .u64 = 0 }, @@ -33285,21 +33402,13 @@ pub fn getTmpAir(sema: Sema) Air { } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { - if (ty.ip_index != .none) { - if (@enumToInt(ty.ip_index) < Air.ref_start_index) - return @intToEnum(Air.Inst.Ref, @enumToInt(ty.ip_index)); - try sema.air_instructions.append(sema.gpa, .{ - .tag = .interned, - .data = .{ .interned = ty.ip_index }, - }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); - } else { - try sema.air_instructions.append(sema.gpa, .{ - .tag = .const_ty, - .data = .{ .ty = ty }, - }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); - } + if (@enumToInt(ty.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(ty.toIntern())); + try sema.air_instructions.append(sema.gpa, .{ + .tag = .interned, + .data = .{ .interned = ty.toIntern() }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { @@ -33313,12 +33422,12 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { const gpa = sema.gpa; - if (val.ip_index != .none and val.ip_index != .null_value) { - if (@enumToInt(val.ip_index) < Air.ref_start_index) - return @intToEnum(Air.Inst.Ref, @enumToInt(val.ip_index)); + if (val.ip_index != .none) { + if (@enumToInt(val.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(val.toIntern())); try sema.air_instructions.append(gpa, .{ .tag = .interned, - .data = .{ .interned = val.ip_index }, + .data = .{ .interned = val.toIntern() }, }); const result = Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); // This assertion can be removed when the `ty` parameter is removed from @@ -33417,7 +33526,7 @@ fn analyzeComptimeAlloc( try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); return sema.addConstant(ptr_type, (try sema.mod.intern(.{ .ptr = .{ - .ty = ptr_type.ip_index, + .ty = ptr_type.toIntern(), .addr = .{ .mut_decl = .{ .decl = decl_index, .runtime_index = block.runtime_index, @@ -33589,7 +33698,7 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError /// This logic must be kept in sync with `Type.isPtrLikeOptional`. fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { const mod = sema.mod; - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice => null, .C => ptr_type.elem_type.toType(), @@ -33624,10 +33733,10 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { /// elsewhere in value.zig pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const mod = sema.mod; - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .empty_struct_type => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => return false, .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); @@ -33873,7 +33982,7 @@ fn anonStructFieldIndex( field_src: LazySrcLoc, ) !u32 { const mod = sema.mod; - const anon_struct = mod.intern_pool.indexToKey(struct_ty.ip_index).anon_struct_type; + const anon_struct = mod.intern_pool.indexToKey(struct_ty.toIntern()).anon_struct_type; for (anon_struct.names, 0..) |name, i| { if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) { return @intCast(u32, i); @@ -33891,14 +34000,17 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty); + scalar.* = try (try sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.intAddScalar(lhs, rhs, ty); } @@ -33945,14 +34057,17 @@ fn numberAddWrapScalar( fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty); + scalar.* = try (try sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.intSubScalar(lhs, rhs, ty); } @@ -34004,18 +34119,26 @@ fn intSubWithOverflow( ) !Value.OverflowArithmeticResult { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); - for (result_data, 0..) |*scalar, i| { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); + const result_data = try sema.arena.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); + of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return Value.OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return sema.intSubWithOverflowScalar(lhs, rhs, ty); @@ -34057,13 +34180,17 @@ fn floatToInt( ) CompileError!Value { const mod = sema.mod; if (float_ty.zigTypeTag(mod) == .Vector) { - const elem_ty = float_ty.childType(mod); - const result_data = try sema.arena.alloc(Value, float_ty.vectorLen(mod)); + const elem_ty = float_ty.scalarType(mod); + const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(mod)); + const scalar_ty = int_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(sema.mod, i); - scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod)); + scalar.* = try (try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod))).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = int_ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.floatToIntScalar(block, src, val, float_ty, int_ty); } @@ -34139,16 +34266,16 @@ fn intFitsInType( vector_index: ?*usize, ) CompileError!bool { const mod = sema.mod; - if (ty.ip_index == .comptime_int_type) return true; + if (ty.toIntern() == .comptime_int_type) return true; const info = ty.intInfo(mod); - switch (val.ip_index) { + switch (val.toIntern()) { .undef, .zero, .zero_usize, .zero_u8, => return true, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable, .extern_func, .func, .ptr => { const target = mod.getTarget(); const ptr_bits = target.ptrBitWidth(); @@ -34219,12 +34346,12 @@ fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { /// Asserts the type is an enum. fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { const mod = sema.mod; - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type; assert(enum_type.tag_mode != .nonexhaustive); // The `tagValueIndex` function call below relies on the type being the integer tag type. // `getCoerced` assumes the value will fit the new type. if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false; - const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.ip_index, enum_type.tag_ty); + const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.toIntern(), enum_type.tag_ty); return enum_type.tagValueIndex(&mod.intern_pool, int_coerced) != null; } @@ -34237,18 +34364,26 @@ fn intAddWithOverflow( ) !Value.OverflowArithmeticResult { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); - for (result_data, 0..) |*scalar, i| { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); + const result_data = try sema.arena.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); + of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return Value.OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return sema.intAddWithOverflowScalar(lhs, rhs, ty); @@ -34340,14 +34475,17 @@ fn compareVector( ) !Value { const mod = sema.mod; assert(ty.zigTypeTag(mod) == .Vector); - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); - scalar.* = Value.makeBool(res_bool); + scalar.* = try Value.makeBool(res_bool).intern(Type.bool, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } /// Returns the type of a pointer to an element. diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 2222c1060e42..d82fb72dea01 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -103,10 +103,26 @@ pub fn print( return writer.writeAll(" }"); }, .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); + .repeated => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + var i: u32 = 0; + try writer.writeAll(".{ "); + const elem_tv = TypedValue{ + .ty = ty.elemType2(mod), + .val = val.castTag(.repeated).?.data, + }; + const len = ty.arrayLen(mod); + const max_len = std.math.min(len, max_aggregate_items); + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + try print(elem_tv, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); }, // TODO these should not appear in this function .inferred_alloc => return writer.writeAll("(inferred allocation value)"), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index faf158e2a43b..16b103c898e4 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -846,7 +846,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), @@ -6169,7 +6168,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 778662fe86f1..f0a44b72a858 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -830,7 +830,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), @@ -6117,7 +6116,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index a9cd130fa89a..7f4715a4510c 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -660,7 +660,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), @@ -2571,7 +2570,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index dc086dc00f02..9f44dc0e8a82 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -680,7 +680,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), @@ -4567,7 +4566,6 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst), } } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 66c0399343cc..85fc8346f837 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1833,7 +1833,6 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const air_tags = func.air.instructions.items(.tag); return switch (air_tags[inst]) { .constant => unreachable, - .const_ty => unreachable, .interned => unreachable, .add => func.airBinOp(inst, .add), @@ -6903,28 +6902,12 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { .child = .u8_type, .sentinel = .zero_u8, }); - const string_bytes = &mod.string_literal_bytes; - try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len); - const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, @as([]const u8, tag_name), Module.StringLiteralAdapter{ - .bytes = string_bytes, - }, Module.StringLiteralContext{ - .bytes = string_bytes, - }); - if (!gop.found_existing) { - gop.key_ptr.* = .{ - .index = @intCast(u32, string_bytes.items.len), - .len = @intCast(u32, tag_name.len), - }; - string_bytes.appendSliceAssumeCapacity(tag_name); - gop.value_ptr.* = .none; - } - var name_val_payload: Value.Payload.StrLit = .{ - .base = .{ .tag = .str_lit }, - .data = gop.key_ptr.*, - }; - const name_val = Value.initPayload(&name_val_payload.base); + const name_val = try mod.intern(.{ .aggregate = .{ + .ty = name_ty.toIntern(), + .storage = .{ .bytes = tag_name }, + } }); const tag_sym_index = try func.bin_file.lowerUnnamedConst( - .{ .ty = name_ty, .val = name_val }, + .{ .ty = name_ty, .val = name_val.toValue() }, enum_decl_index, ); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 4a5532a23954..f2ac98584415 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1923,7 +1923,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => if (self.wantSafety()) try self.airTrap() else self.finishAirBookkeeping(), @@ -2099,7 +2098,7 @@ fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { switch (self.air.instructions.items(.tag)[inst]) { - .constant, .const_ty => unreachable, + .constant => unreachable, else => self.inst_tracking.getPtr(inst).?.die(self, inst), } } @@ -11593,7 +11592,6 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { })); break :tracking gop.value_ptr; }, - .const_ty => unreachable, else => self.inst_tracking.getPtr(inst).?, }.short; switch (mcv) { @@ -11608,7 +11606,6 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { const tracking = switch (self.air.instructions.items(.tag)[inst]) { .constant => &self.const_tracking, - .const_ty => unreachable, else => &self.inst_tracking, }.getPtr(inst).?; return switch (tracking.short) { diff --git a/src/codegen.zig b/src/codegen.zig index b9b7dac90fc8..f343f0441d42 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -204,150 +204,6 @@ pub fn generateSymbol( return .ok; } - if (typed_value.val.ip_index == .none) switch (typed_value.ty.zigTypeTag(mod)) { - .Array => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod)); - // The bytes payload already includes the sentinel, if any - try code.ensureUnusedCapacity(len); - code.appendSliceAssumeCapacity(bytes[0..len]); - return Result.ok; - }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - try code.ensureUnusedCapacity(bytes.len + 1); - code.appendSliceAssumeCapacity(bytes); - if (typed_value.ty.sentinel(mod)) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); - code.appendAssumeCapacity(byte); - } - return Result.ok; - }, - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for array type value: {s}", - .{@tagName(typed_value.val.tag())}, - ), - }, - }, - .Struct => { - if (typed_value.ty.containerLayout(mod) == .Packed) { - const struct_obj = mod.typeToStruct(typed_value.ty).?; - const fields = struct_obj.fields.values(); - const field_vals = typed_value.val.castTag(.aggregate).?.data; - const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - const current_pos = code.items.len; - try code.resize(current_pos + abi_size); - var bits: u16 = 0; - - for (field_vals, 0..) |field_val, index| { - const field_ty = fields[index].ty; - // pointer may point to a decl which must be marked used - // but can also result in a relocation. Therefore we handle those seperately. - if (field_ty.zigTypeTag(mod) == .Pointer) { - const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow; - var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); - defer tmp_list.deinit(); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = field_val, - }, &tmp_list, debug_output, reloc_info)) { - .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), - .fail => |em| return Result{ .fail = em }, - } - } else { - field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; - } - bits += @intCast(u16, field_ty.bitSize(mod)); - } - - return Result.ok; - } - - const struct_begin = code.items.len; - const field_vals = typed_value.val.castTag(.aggregate).?.data; - for (field_vals, 0..) |field_val, index| { - const field_ty = typed_value.ty.structFieldType(index, mod); - if (!field_ty.hasRuntimeBits(mod)) continue; - - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = field_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - const unpadded_field_end = code.items.len - struct_begin; - - // Pad struct members if required - const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); - const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; - - if (padding > 0) { - try code.writer().writeByteNTimes(0, padding); - } - } - - return Result.ok; - }, - .Vector => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(len + padding); - code.appendSliceAssumeCapacity(bytes[0..len]); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(str_lit.len + padding); - code.appendSliceAssumeCapacity(bytes); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - else => unreachable, - }, - .Frame, - .AnyFrame, - => return .{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO generateSymbol for type {}", - .{typed_value.ty.fmt(mod)}, - ) }, - .Float, - .Union, - .Optional, - .ErrorUnion, - .ErrorSet, - .Int, - .Enum, - .Bool, - .Pointer, - => unreachable, // handled below - .Type, - .Void, - .NoReturn, - .ComptimeFloat, - .ComptimeInt, - .Undefined, - .Null, - .Opaque, - .EnumLiteral, - .Fn, - => unreachable, // comptime-only types - }; - switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { .int_type, .ptr_type, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 1bb8130b1fc2..76533b4284a1 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -870,7 +870,7 @@ pub const DeclGen = struct { } // First try specific tag representations for more efficiency. - switch (val.ip_index) { + switch (val.toIntern()) { .undef => { const ai = ty.arrayInfo(mod); try writer.writeByte('{'); @@ -893,24 +893,6 @@ pub const DeclGen = struct { try writer.writeByte('}'); return; }, - .none => switch (val.tag()) { - .bytes, .str_lit => |t| { - const bytes = switch (t) { - .bytes => val.castTag(.bytes).?.data, - .str_lit => bytes: { - const str_lit = val.castTag(.str_lit).?.data; - break :bytes mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - }, - else => unreachable, - }; - const sentinel = if (ty.sentinel(mod)) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; - try writer.print("{s}", .{ - fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen(mod))], sentinel), - }); - return; - }, - else => {}, - }, else => {}, } // Fall back to generic implementation. @@ -2909,7 +2891,6 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, const result_value = switch (air_tags[inst]) { // zig fmt: off .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .arg => try airArg(f, inst), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index f8ddddad1c9f..e54b951aa604 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1501,7 +1501,7 @@ pub const Object = struct { } const ip = &mod.intern_pool; - const enum_type = ip.indexToKey(ty.ip_index).enum_type; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; const enumerators = try gpa.alloc(*llvm.DIEnumerator, enum_type.names.len); defer gpa.free(enumerators); @@ -1697,7 +1697,7 @@ pub const Object = struct { return ptr_di_ty; }, .Opaque => { - if (ty.ip_index == .anyopaque_type) { + if (ty.toIntern() == .anyopaque_type) { const di_ty = dib.createBasicType("anyopaque", 0, DW.ATE.signed); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; @@ -1981,7 +1981,7 @@ pub const Object = struct { break :blk fwd_decl; }; - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; defer di_fields.deinit(gpa); @@ -2466,7 +2466,7 @@ pub const DeclGen = struct { global.setGlobalConstant(.True); break :init_val decl.val; }; - if (init_val.ip_index != .unreachable_value) { + if (init_val.toIntern() != .unreachable_value) { const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val }); if (global.globalGetValueType() == llvm_init.typeOf()) { global.setInitializer(llvm_init); @@ -2802,12 +2802,12 @@ pub const DeclGen = struct { return dg.context.pointerType(llvm_addrspace); }, .Opaque => { - if (t.ip_index == .anyopaque_type) return dg.context.intType(8); + if (t.toIntern() == .anyopaque_type) return dg.context.intType(8); const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - const opaque_type = mod.intern_pool.indexToKey(t.ip_index).opaque_type; + const opaque_type = mod.intern_pool.indexToKey(t.toIntern()).opaque_type; const name = try mod.opaqueFullyQualifiedName(opaque_type); defer gpa.free(name); @@ -2897,7 +2897,7 @@ pub const DeclGen = struct { const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - const struct_type = switch (mod.intern_pool.indexToKey(t.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(t.toIntern())) { .anon_struct_type => |tuple| { const llvm_struct_ty = dg.context.structCreateNamed(""); gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls @@ -3199,7 +3199,7 @@ pub const DeclGen = struct { const mod = dg.module; const target = mod.getTarget(); var tv = arg_tv; - switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .runtime_value => |rt| tv.val = rt.val.toValue(), else => {}, } @@ -3208,284 +3208,7 @@ pub const DeclGen = struct { return llvm_type.getUndef(); } - if (tv.val.ip_index == .none) switch (tv.ty.zigTypeTag(mod)) { - .Array => switch (tv.val.tag()) { - .bytes => { - const bytes = tv.val.castTag(.bytes).?.data; - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), - .True, // Don't null terminate. Bytes has the sentinel, if any. - ); - }, - .str_lit => { - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - if (tv.ty.sentinel(mod)) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); - if (byte == 0 and bytes.len > 0) { - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, bytes.len), - .False, // Yes, null terminate. - ); - } - var array = std.ArrayList(u8).init(dg.gpa); - defer array.deinit(); - try array.ensureUnusedCapacity(bytes.len + 1); - array.appendSliceAssumeCapacity(bytes); - array.appendAssumeCapacity(byte); - return dg.context.constString( - array.items.ptr, - @intCast(c_uint, array.items.len), - .True, // Don't null terminate. - ); - } else { - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, bytes.len), - .True, // Don't null terminate. `bytes` has the sentinel, if any. - ); - } - }, - else => unreachable, - }, - .Struct => { - const llvm_struct_ty = try dg.lowerType(tv.ty); - const gpa = dg.gpa; - - const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { - .anon_struct_type => |tuple| { - var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; - defer llvm_fields.deinit(gpa); - - try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); - - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; - - for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { - if (field_val != .none) continue; - if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; - - const field_align = field_ty.toType().abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - - const field_llvm_val = try dg.lowerValue(.{ - .ty = field_ty.toType(), - .val = try tv.val.fieldValue(mod, i), - }); - - need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); - - llvm_fields.appendAssumeCapacity(field_llvm_val); - - offset += field_ty.toType().abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } - - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - }, - .struct_type => |struct_type| struct_type, - else => unreachable, - }; - - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - - if (struct_obj.layout == .Packed) { - assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); - const fields = struct_obj.fields.values(); - comptime assert(Type.packed_struct_layout_version == 2); - var running_int: *llvm.Value = int_llvm_ty.constNull(); - var running_bits: u16 = 0; - for (fields, 0..) |field, i| { - if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - - const non_int_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(mod, i), - }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); - const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime(mod)) - non_int_val.constPtrToInt(small_int_ty) - else - non_int_val.constBitCast(small_int_ty); - const shift_rhs = int_llvm_ty.constInt(running_bits, .False); - // If the field is as large as the entire packed struct, this - // zext would go from, e.g. i16 to i16. This is legal with - // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); - const shifted = extended_int_val.constShl(shift_rhs); - running_int = running_int.constOr(shifted); - running_bits += ty_bit_size; - } - return running_int; - } - - const llvm_field_count = llvm_struct_ty.countStructElementTypes(); - var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); - defer llvm_fields.deinit(gpa); - - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; - - var it = struct_obj.runtimeFieldIterator(mod); - while (it.next()) |field_and_index| { - const field = field_and_index.field; - const field_align = field.alignment(mod, struct_obj.layout); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - - const field_llvm_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(mod, field_and_index.index), - }); - - need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); - - llvm_fields.appendAssumeCapacity(field_llvm_val); - - offset += field.ty.abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } - - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - }, - .Vector => switch (tv.val.tag()) { - .bytes => { - // Note, sentinel is not stored even if the type has a sentinel. - const bytes = tv.val.castTag(.bytes).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == bytes.len or vector_len + 1 == bytes.len); - - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = try mod.intValue(elem_ty, bytes[i]), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .str_lit => { - // Note, sentinel is not stored - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == bytes.len); - - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = try mod.intValue(elem_ty, bytes[i]), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - else => unreachable, - }, - .Float, - .Union, - .Optional, - .ErrorUnion, - .ErrorSet, - .Int, - .Enum, - .Bool, - .Pointer, - => unreachable, // handled below - .Frame, - .AnyFrame, - => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}), - .Type, - .Void, - .NoReturn, - .ComptimeFloat, - .ComptimeInt, - .Undefined, - .Null, - .Opaque, - .EnumLiteral, - .Fn, - => unreachable, // comptime-only types - }; - - switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .int_type, .ptr_type, .array_type, @@ -3553,7 +3276,7 @@ pub const DeclGen = struct { const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, .val = switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_type.ip_index }), + .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }), .payload => |payload| payload, }.toValue(), }); @@ -3700,7 +3423,7 @@ pub const DeclGen = struct { fields_buf[0] = try dg.lowerValue(.{ .ty = payload_ty, .val = switch (opt.val) { - .none => try mod.intern(.{ .undef = payload_ty.ip_index }), + .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), else => |payload| payload, }.toValue(), }); @@ -3711,7 +3434,7 @@ pub const DeclGen = struct { } return dg.context.constStruct(&fields_buf, llvm_field_count, .False); }, - .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) { .array_type => switch (aggregate.storage) { .bytes => |bytes| return dg.context.constString( bytes.ptr, @@ -3802,7 +3525,7 @@ pub const DeclGen = struct { const llvm_struct_ty = try dg.lowerType(tv.ty); const gpa = dg.gpa; - const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) { .anon_struct_type => |tuple| { var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; defer llvm_fields.deinit(gpa); @@ -3967,9 +3690,9 @@ pub const DeclGen = struct { }, .un => { const llvm_union_ty = try dg.lowerType(tv.ty); - const tag_and_val: Value.Payload.Union.Data = switch (tv.val.ip_index) { + const tag_and_val: Value.Payload.Union.Data = switch (tv.val.toIntern()) { .none => tv.val.castTag(.@"union").?.data, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .un => |un| .{ .tag = un.tag.toValue(), .val = un.val.toValue() }, else => unreachable, }, @@ -4107,7 +3830,7 @@ pub const DeclGen = struct { fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { const mod = dg.module; const target = mod.getTarget(); - return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + return switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { .int => |int| dg.lowerIntAsPtr(int), .ptr => |ptr| switch (ptr.addr) { .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), @@ -4799,7 +4522,6 @@ pub const FuncGen = struct { .vector_store_elem => try self.airVectorStoreElem(inst), .constant => unreachable, - .const_ty => unreachable, .interned => unreachable, .unreach => self.airUnreach(inst), @@ -6108,7 +5830,7 @@ pub const FuncGen = struct { const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ - .elem_type = llvm_field.ty.ip_index, + .elem_type = llvm_field.ty.toIntern(), .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), }); if (isByRef(field_ty, mod)) { @@ -6984,7 +6706,7 @@ pub const FuncGen = struct { const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ - .elem_type = llvm_field.ty.ip_index, + .elem_type = llvm_field.ty.toIntern(), .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), }); return self.load(field_ptr, field_ptr_ty); @@ -8915,7 +8637,7 @@ pub const FuncGen = struct { fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { const mod = self.dg.module; - const enum_type = mod.intern_pool.indexToKey(enum_ty.ip_index).enum_type; + const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type; // TODO: detect when the type changes and re-emit this function. const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_type.decl); @@ -8988,7 +8710,7 @@ pub const FuncGen = struct { fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { const mod = self.dg.module; - const enum_type = mod.intern_pool.indexToKey(enum_ty.ip_index).enum_type; + const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type; // TODO: detect when the type changes and re-emit this function. const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_type.decl); @@ -10529,7 +10251,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { var offset: u64 = 0; var big_align: u32 = 0; - const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var llvm_field_index: c_uint = 0; for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { @@ -10927,7 +10649,7 @@ const ParamTypeIterator = struct { .riscv32, .riscv64 => { it.zig_index += 1; it.llvm_index += 1; - if (ty.ip_index == .f16_type) { + if (ty.toIntern() == .f16_type) { return .as_u16; } switch (riscv_c_abi.classifyType(ty, mod)) { @@ -11146,7 +10868,7 @@ fn isByRef(ty: Type, mod: *Module) bool { .Struct => { // Packed structs are represented to LLVM as integers. if (ty.containerLayout(mod) == .Packed) return false; - const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var count: usize = 0; for (tuple.types, tuple.values) |field_ty, field_val| { @@ -11261,7 +10983,7 @@ fn backendSupportsF128(target: std.Target) bool { /// LLVM does not support all relevant intrinsics for all targets, so we /// may need to manually generate a libc call fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool { - return switch (scalar_ty.ip_index) { + return switch (scalar_ty.toIntern()) { .f16_type => backendSupportsF16(target), .f80_type => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target), .f128_type => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 96c723989ae6..2b04e03a5a6f 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -616,7 +616,7 @@ pub const DeclGen = struct { const mod = dg.module; var val = arg_val; - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .runtime_value => |rt| val = rt.val.toValue(), else => {}, } @@ -626,75 +626,7 @@ pub const DeclGen = struct { return try self.addUndef(size); } - if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { - .Array => switch (val.tag()) { - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - try self.addBytes(bytes); - if (ty.sentinel(mod)) |sentinel| { - try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod))); - } - }, - .bytes => { - const bytes = val.castTag(.bytes).?.data; - try self.addBytes(bytes); - }, - else => |tag| return dg.todo("indirect array constant with tag {s}", .{@tagName(tag)}), - }, - .Struct => { - if (ty.isSimpleTupleOrAnonStruct(mod)) { - unreachable; // TODO - } else { - const struct_ty = mod.typeToStruct(ty).?; - - if (struct_ty.layout == .Packed) { - return dg.todo("packed struct constants", .{}); - } - - const struct_begin = self.size; - const field_vals = val.castTag(.aggregate).?.data; - for (struct_ty.fields.values(), 0..) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; - try self.lower(field.ty, field_vals[i]); - - // Add padding if required. - // TODO: Add to type generation as well? - const unpadded_field_end = self.size - struct_begin; - const padded_field_end = ty.structFieldOffset(i + 1, mod); - const padding = padded_field_end - unpadded_field_end; - try self.addUndef(padding); - } - } - }, - .Vector, - .Frame, - .AnyFrame, - => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}), - .Float, - .Union, - .Optional, - .ErrorUnion, - .ErrorSet, - .Int, - .Enum, - .Bool, - .Pointer, - => unreachable, // handled below - .Type, - .Void, - .NoReturn, - .ComptimeFloat, - .ComptimeInt, - .Undefined, - .Null, - .Opaque, - .EnumLiteral, - .Fn, - => unreachable, // comptime-only types - }; - - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .int_type, .ptr_type, .array_type, @@ -1876,7 +1808,6 @@ pub const DeclGen = struct { .breakpoint => return, .cond_br => return self.airCondBr(inst), .constant => unreachable, - .const_ty => unreachable, .dbg_stmt => return self.airDbgStmt(inst), .loop => return self.airLoop(inst), .ret => return self.airRet(inst), diff --git a/src/print_air.zig b/src/print_air.zig index 9169a88bbcc0..58e4029543cc 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -95,7 +95,7 @@ const Writer = struct { for (w.air.instructions.items(.tag), 0..) |tag, i| { const inst = @intCast(Air.Inst.Index, i); switch (tag) { - .constant, .const_ty, .interned => { + .constant, .interned => { try w.writeInst(s, inst); try s.writeByte('\n'); }, @@ -226,7 +226,6 @@ const Writer = struct { .save_err_return_trace_index, => try w.writeNoOp(s, inst), - .const_ty, .alloc, .ret_ptr, .err_return_trace, diff --git a/src/value.zig b/src/value.zig index 47215e588cbc..ef3a3f6be18d 100644 --- a/src/value.zig +++ b/src/value.zig @@ -37,8 +37,9 @@ pub const Value = struct { /// A slice of u8 whose memory is managed externally. bytes, - /// Similar to bytes however it stores an index relative to `Module.string_literal_bytes`. - str_lit, + /// This value is repeated some number of times. The amount of times to repeat + /// is stored externally. + repeated, /// An instance of a struct, array, or vector. /// Each element/field stored as a `Value`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, @@ -57,9 +58,9 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .bytes => Payload.Bytes, + .repeated => Payload.SubValue, - .str_lit => Payload.StrLit, + .bytes => Payload.Bytes, .inferred_alloc => Payload.InferredAlloc, .inferred_alloc_comptime => Payload.InferredAllocComptime, @@ -171,7 +172,18 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .str_lit => return self.copyPayloadShallow(arena, Payload.StrLit), + .repeated => { + const payload = self.cast(Payload.SubValue).?; + const new_payload = try arena.create(Payload.SubValue); + new_payload.* = .{ + .base = payload.base, + .data = try payload.data.copy(arena), + }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; + }, .aggregate => { const payload = self.castTag(.aggregate).?; const new_payload = try arena.create(Payload.Aggregate); @@ -187,7 +199,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .@"union" => { const tag_and_val = self.castTag(.@"union").?.data; const new_payload = try arena.create(Payload.Union); @@ -203,7 +214,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .inferred_alloc => unreachable, .inferred_alloc_comptime => unreachable, } @@ -237,7 +247,7 @@ pub const Value = struct { ) !void { comptime assert(fmt.len == 0); if (start_val.ip_index != .none) { - try out_stream.print("(interned: {})", .{start_val.ip_index}); + try out_stream.print("(interned: {})", .{start_val.toIntern()}); return; } var val = start_val; @@ -249,11 +259,9 @@ pub const Value = struct { return out_stream.writeAll("(union value)"); }, .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - return out_stream.print("(.str_lit index={d} len={d})", .{ - str_lit.index, str_lit.len, - }); + .repeated => { + try out_stream.writeAll("(repeated) "); + val = val.castTag(.repeated).?.data; }, .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"), @@ -274,40 +282,24 @@ pub const Value = struct { /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { - switch (val.ip_index) { - .none => switch (val.tag()) { - .bytes => { - const bytes = val.castTag(.bytes).?.data; - const adjusted_len = bytes.len - @boolToInt(ty.sentinel(mod) != null); - const adjusted_bytes = bytes[0..adjusted_len]; - return allocator.dupe(u8, adjusted_bytes); - }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - return allocator.dupe(u8, bytes); - }, - else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), + .ptr => |ptr| switch (ptr.len) { + .none => unreachable, + else => arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), - .ptr => |ptr| switch (ptr.len) { - .none => unreachable, - else => arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => |bytes| try allocator.dupe(u8, bytes), - .elems => arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), - .repeated_elem => |elem| { - const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); - @memset(result, byte); - return result; - }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try allocator.dupe(u8, bytes), + .elems => arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + .repeated_elem => |elem| { + const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); + const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); + @memset(result, byte); + return result; }, - else => unreachable, }, - } + else => unreachable, + }; } fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { @@ -320,13 +312,13 @@ pub const Value = struct { } pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { - if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.ip_index, ty.ip_index); + if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), ty.toIntern()); switch (val.tag()) { .aggregate => { const old_elems = val.castTag(.aggregate).?.data; const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); defer mod.gpa.free(new_elems); - const ty_key = mod.intern_pool.indexToKey(ty.ip_index); + const ty_key = mod.intern_pool.indexToKey(ty.toIntern()); for (new_elems, old_elems, 0..) |*new_elem, old_elem, field_i| new_elem.* = try old_elem.intern(switch (ty_key) { .struct_type => ty.structFieldType(field_i, mod), @@ -335,14 +327,14 @@ pub const Value = struct { else => unreachable, }, mod); return mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = new_elems }, } }); }, .@"union" => { const pl = val.castTag(.@"union").?.data; return mod.intern(.{ .un = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .tag = try pl.tag.intern(ty.unionTagTypeHypothetical(mod), mod), .val = try pl.val.intern(ty.unionFieldType(pl.tag, mod), mod), } }); @@ -353,13 +345,15 @@ pub const Value = struct { pub fn unintern(val: Value, arena: Allocator, mod: *Module) Allocator.Error!Value { if (val.ip_index == .none) return val; - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| return Tag.bytes.create(arena, try arena.dupe(u8, bytes)), .elems => |old_elems| { const new_elems = try arena.alloc(Value, old_elems.len); for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = old_elem.toValue(); return Tag.aggregate.create(arena, new_elems); }, + .repeated_elem => |elem| return Tag.repeated.create(arena, elem.toValue()), }, else => return val, } @@ -372,40 +366,38 @@ pub const Value = struct { /// Asserts that the value is representable as a type. pub fn toType(self: Value) Type { - return self.ip_index.toType(); + return self.toIntern().toType(); } pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { const ip = &mod.intern_pool; - switch (val.ip_index) { - else => return switch (ip.indexToKey(ip.typeOf(val.ip_index))) { - // Assume it is already an integer and return it directly. - .simple_type, .int_type => val, - .enum_literal => |enum_literal| { - const field_index = ty.enumFieldIndex(ip.stringToSlice(enum_literal), mod).?; - return switch (ip.indexToKey(ty.ip_index)) { - // Assume it is already an integer and return it directly. - .simple_type, .int_type => val, - .enum_type => |enum_type| if (enum_type.values.len != 0) - enum_type.values[field_index].toValue() - else // Field index and integer values are the same. - mod.intValue(enum_type.tag_ty.toType(), field_index), - else => unreachable, - }; - }, - .enum_type => |enum_type| (try ip.getCoerced( - mod.gpa, - val.ip_index, - enum_type.tag_ty, - )).toValue(), - else => unreachable, + return switch (ip.indexToKey(ip.typeOf(val.toIntern()))) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_literal => |enum_literal| { + const field_index = ty.enumFieldIndex(ip.stringToSlice(enum_literal), mod).?; + return switch (ip.indexToKey(ty.toIntern())) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_type => |enum_type| if (enum_type.values.len != 0) + enum_type.values[field_index].toValue() + else // Field index and integer values are the same. + mod.intValue(enum_type.tag_ty.toType(), field_index), + else => unreachable, + }; }, - } + .enum_type => |enum_type| (try ip.getCoerced( + mod.gpa, + val.toIntern(), + enum_type.tag_ty, + )).toValue(), + else => unreachable, + }; } pub fn tagName(val: Value, mod: *Module) []const u8 { const ip = &mod.intern_pool; - const enum_tag = switch (ip.indexToKey(val.ip_index)) { + const enum_tag = switch (ip.indexToKey(val.toIntern())) { .un => |un| ip.indexToKey(un.tag).enum_tag, .enum_tag => |x| x, .enum_literal => |name| return ip.stringToSlice(name), @@ -413,7 +405,7 @@ pub const Value = struct { }; const enum_type = ip.indexToKey(enum_tag.ty).enum_type; const field_index = field_index: { - const field_index = enum_type.tagValueIndex(ip, val.ip_index).?; + const field_index = enum_type.tagValueIndex(ip, val.toIntern()).?; break :field_index @intCast(u32, field_index); }; const field_name = enum_type.names[field_index]; @@ -432,12 +424,12 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!BigIntConst { - return switch (val.ip_index) { + return switch (val.toIntern()) { .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), .undef => unreachable, .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .runtime_value => |runtime_value| runtime_value.val.toValue().toBigIntAdvanced(space, mod, opt_sema), .int => |int| switch (int.storage) { .u64, .i64, .big_int => int.storage.toBigInt(space), @@ -475,18 +467,18 @@ pub const Value = struct { } pub fn getFunctionIndex(val: Value, mod: *Module) Module.Fn.OptionalIndex { - return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.ip_index) else .none; + return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.toIntern()) else .none; } pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc { - return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { .extern_func => |extern_func| extern_func, else => null, } else null; } pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { - return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| variable, else => null, } else null; @@ -501,11 +493,11 @@ pub const Value = struct { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { - return switch (val.ip_index) { + return switch (val.toIntern()) { .bool_false => 0, .bool_true => 1, .undef => unreachable, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(u64) catch null, .u64 => |x| x, @@ -531,11 +523,11 @@ pub const Value = struct { /// Asserts the value is an integer and it fits in a i64 pub fn toSignedInt(val: Value, mod: *Module) i64 { - return switch (val.ip_index) { + return switch (val.toIntern()) { .bool_false => 0, .bool_true => 1, .undef => unreachable, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(i64) catch unreachable, .i64 => |x| x, @@ -549,7 +541,7 @@ pub const Value = struct { } pub fn toBool(val: Value, _: *const Module) bool { - return switch (val.ip_index) { + return switch (val.toIntern()) { .bool_true => true, .bool_false => false, else => unreachable, @@ -558,7 +550,7 @@ pub const Value = struct { fn isDeclRef(val: Value, mod: *Module) bool { var check = val; - while (true) switch (mod.intern_pool.indexToKey(check.ip_index)) { + while (true) switch (mod.intern_pool.indexToKey(check.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl, .comptime_field => return true, .eu_payload, .opt_payload => |index| check = index.toValue(), @@ -644,7 +636,7 @@ pub const Value = struct { .ErrorSet => { // TODO revisit this when we have the concept of the error tag type const Int = u16; - const name = switch (mod.intern_pool.indexToKey(val.ip_index)) { + const name = switch (mod.intern_pool.indexToKey(val.toIntern())) { .err => |err| err.name, .error_union => |error_union| error_union.val.err_name, else => unreachable, @@ -718,7 +710,7 @@ pub const Value = struct { if (abi_size == 0) return; if (abi_size <= @sizeOf(u64)) { - const ip_key = mod.intern_pool.indexToKey(int_val.ip_index); + const ip_key = mod.intern_pool.indexToKey(int_val.toIntern()); const int: u64 = switch (ip_key.int.storage) { .u64 => |x| x, .i64 => |x| @bitCast(u64, x), @@ -847,7 +839,7 @@ pub const Value = struct { } }, .Float => return (try mod.intern(.{ .float = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = switch (ty.floatBits(target)) { 16 => .{ .f16 = @bitCast(f16, std.mem.readInt(u16, buffer[0..2], endian)) }, 32 => .{ .f32 = @bitCast(f32, std.mem.readInt(u32, buffer[0..4], endian)) }, @@ -860,13 +852,16 @@ pub const Value = struct { .Array => { const elem_ty = ty.childType(mod); const elem_size = elem_ty.abiSize(mod); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); + const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod))); var offset: usize = 0; for (elems) |*elem| { - elem.* = try readFromMemory(elem_ty, mod, buffer[offset..], arena); + elem.* = try (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).intern(elem_ty, mod); offset += @intCast(usize, elem_size); } - return Tag.aggregate.create(arena, elems); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue(); }, .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes @@ -878,13 +873,16 @@ pub const Value = struct { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => { const fields = ty.structFields(mod).values(); - const field_vals = try arena.alloc(Value, fields.len); - for (fields, 0..) |field, i| { + const field_vals = try arena.alloc(InternPool.Index, fields.len); + for (field_vals, fields, 0..) |*field_val, field, i| { const off = @intCast(usize, ty.structFieldOffset(i, mod)); - const sz = @intCast(usize, ty.structFieldType(i, mod).abiSize(mod)); - field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena); + const sz = @intCast(usize, field.ty.abiSize(mod)); + field_val.* = try (try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena)).intern(field.ty, mod); } - return Tag.aggregate.create(arena, field_vals); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); }, .Packed => { const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; @@ -897,7 +895,7 @@ pub const Value = struct { const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian); const name = mod.error_name_list.items[@intCast(usize, int)]; return (try mod.intern(.{ .err = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .name = mod.intern_pool.getString(name).unwrap().?, } })).toValue(); }, @@ -961,7 +959,7 @@ pub const Value = struct { } }, .Float => return (try mod.intern(.{ .float = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = switch (ty.floatBits(target)) { 16 => .{ .f16 = @bitCast(f16, std.mem.readPackedInt(u16, buffer, bit_offset, endian)) }, 32 => .{ .f32 = @bitCast(f32, std.mem.readPackedInt(u32, buffer, bit_offset, endian)) }, @@ -973,17 +971,20 @@ pub const Value = struct { } })).toValue(), .Vector => { const elem_ty = ty.childType(mod); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); + const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod))); var bits: u16 = 0; const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); for (elems, 0..) |_, i| { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i; - elems[tgt_elem_i] = try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena); + elems[tgt_elem_i] = try (try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena)).intern(elem_ty, mod); bits += elem_bit_size; } - return Tag.aggregate.create(arena, elems); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue(); }, .Struct => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already @@ -991,13 +992,16 @@ pub const Value = struct { .Packed => { var bits: u16 = 0; const fields = ty.structFields(mod).values(); - const field_vals = try arena.alloc(Value, fields.len); + const field_vals = try arena.alloc(InternPool.Index, fields.len); for (fields, 0..) |field, i| { const field_bits = @intCast(u16, field.ty.bitSize(mod)); - field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena); + field_vals[i] = try (try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena)).intern(field.ty, mod); bits += field_bits; } - return Tag.aggregate.create(arena, field_vals); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); }, }, .Pointer => { @@ -1015,7 +1019,7 @@ pub const Value = struct { /// Asserts that the value is a float or an integer. pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), inline .u64, .i64 => |x| { @@ -1119,7 +1123,7 @@ pub const Value = struct { pub fn floatCast(self: Value, dest_ty: Type, mod: *Module) !Value { const target = mod.getTarget(); return (try mod.intern(.{ .float = .{ - .ty = dest_ty.ip_index, + .ty = dest_ty.toIntern(), .storage = switch (dest_ty.floatBits(target)) { 16 => .{ .f16 = self.toFloat(f16, mod) }, 32 => .{ .f32 = self.toFloat(f32, mod) }, @@ -1133,7 +1137,7 @@ pub const Value = struct { /// Asserts the value is a float pub fn floatHasFraction(self: Value, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(self.ip_index)) { + return switch (mod.intern_pool.indexToKey(self.toIntern())) { .float => |float| switch (float.storage) { inline else => |x| @rem(x, 1) != 0, }, @@ -1150,10 +1154,10 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { - return switch (lhs.ip_index) { + return switch (lhs.toIntern()) { .bool_false => .eq, .bool_true => .gt, - else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) { + else => switch (mod.intern_pool.indexToKey(lhs.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl, .comptime_field => .gt, .int => |int| int.toValue().orderAgainstZeroAdvanced(mod, opt_sema), @@ -1212,8 +1216,8 @@ pub const Value = struct { const lhs_tag = lhs.tag(); const rhs_tag = rhs.tag(); if (lhs_tag == rhs_tag) { - const lhs_storage = mod.intern_pool.indexToKey(lhs.ip_index).float.storage; - const rhs_storage = mod.intern_pool.indexToKey(rhs.ip_index).float.storage; + const lhs_storage = mod.intern_pool.indexToKey(lhs.toIntern()).float.storage; + const rhs_storage = mod.intern_pool.indexToKey(rhs.toIntern()).float.storage; const lhs128: f128 = switch (lhs_storage) { inline else => |x| x, }; @@ -1336,46 +1340,20 @@ pub const Value = struct { } } - switch (lhs.ip_index) { - .none => switch (lhs.tag()) { - .aggregate => { - for (lhs.castTag(.aggregate).?.data) |elem_val| { - if (!(try elem_val.compareAllWithZeroAdvancedExtra(op, mod, opt_sema))) return false; - } - return true; - }, - .str_lit => { - const str_lit = lhs.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - for (bytes) |byte| { - if (!std.math.compare(byte, op, 0)) return false; - } - return true; - }, - .bytes => { - const bytes = lhs.castTag(.bytes).?.data; - for (bytes) |byte| { - if (!std.math.compare(byte, op, 0)) return false; - } - return true; - }, - else => {}, + switch (mod.intern_pool.indexToKey(lhs.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| if (std.math.isNan(x)) return op == .neq, }, - else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) { - .float => |float| switch (float.storage) { - inline else => |x| if (std.math.isNan(x)) return op == .neq, - }, - .aggregate => |aggregate| return switch (aggregate.storage) { - .bytes => |bytes| for (bytes) |byte| { - if (!std.math.order(byte, 0).compare(op)) break false; - } else true, - .elems => |elems| for (elems) |elem| { - if (!try elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; - } else true, - .repeated_elem => |elem| elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema), - }, - else => {}, + .aggregate => |aggregate| return switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| { + if (!std.math.order(byte, 0).compare(op)) break false; + } else true, + .elems => |elems| for (elems) |elem| { + if (!try elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; + } else true, + .repeated_elem => |elem| elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema), }, + else => {}, } return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); } @@ -1412,7 +1390,7 @@ pub const Value = struct { const b_field_vals = b.castTag(.aggregate).?.data; assert(a_field_vals.len == b_field_vals.len); - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |anon_struct| { assert(anon_struct.types.len == a_field_vals.len); for (anon_struct.types, 0..) |field_ty, i| { @@ -1577,7 +1555,7 @@ pub const Value = struct { // The InternPool data structure hashes based on Key to make interned objects // unique. An Index can be treated simply as u32 value for the // purpose of Type/Value hashing and equality. - std.hash.autoHash(hasher, val.ip_index); + std.hash.autoHash(hasher, val.toIntern()); return; } const zig_ty_tag = ty.zigTypeTag(mod); @@ -1663,7 +1641,7 @@ pub const Value = struct { // The InternPool data structure hashes based on Key to make interned objects // unique. An Index can be treated simply as u32 value for the // purpose of Type/Value hashing and equality. - std.hash.autoHash(hasher, val.ip_index); + std.hash.autoHash(hasher, val.toIntern()); return; } @@ -1703,7 +1681,7 @@ pub const Value = struct { }, .Union => { hasher.update(val.tagName(mod)); - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .un => |un| { const active_field_ty = ty.unionFieldType(un.tag.toValue(), mod); un.val.toValue().hashUncoerced(active_field_ty, hasher, mod); @@ -1746,7 +1724,7 @@ pub const Value = struct { }; pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .mut_decl, .comptime_field => true, .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isComptimeMutablePtr(mod), @@ -1758,8 +1736,8 @@ pub const Value = struct { } pub fn canMutateComptimeVarState(val: Value, mod: *Module) bool { - return val.isComptimeMutablePtr(mod) or switch (val.ip_index) { - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + return val.isComptimeMutablePtr(mod) or switch (val.toIntern()) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .error_union => |error_union| switch (error_union.val) { .err_name => false, .payload => |payload| payload.toValue().canMutateComptimeVarState(mod), @@ -1785,7 +1763,7 @@ pub const Value = struct { /// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr), /// this function returns null. pub fn pointerDecl(val: Value, mod: *Module) ?Module.Decl.Index { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| variable.decl, .extern_func => |extern_func| extern_func.decl, .func => |func| mod.funcPtr(func.index).owner_decl, @@ -1811,35 +1789,19 @@ pub const Value = struct { pub const slice_len_index = 1; pub fn slicePtr(val: Value, mod: *Module) Value { - return mod.intern_pool.slicePtr(val.ip_index).toValue(); + return mod.intern_pool.slicePtr(val.toIntern()).toValue(); } pub fn sliceLen(val: Value, mod: *Module) u64 { - return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod); + return mod.intern_pool.sliceLen(val.toIntern()).toValue().toUnsignedInt(mod); } /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { - switch (val.ip_index) { + switch (val.toIntern()) { .undef => return Value.undef, - .none => switch (val.tag()) { - .bytes => { - const byte = val.castTag(.bytes).?.data[index]; - return mod.intValue(Type.u8, byte); - }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const byte = bytes[index]; - return mod.intValue(Type.u8, byte); - }, - - .aggregate => return val.castTag(.aggregate).?.data[index], - - else => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), @@ -1871,26 +1833,26 @@ pub const Value = struct { } pub fn isLazyAlign(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| int.storage == .lazy_align, else => false, }; } pub fn isLazySize(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| int.storage == .lazy_size, else => false, }; } pub fn isRuntimeValue(val: Value, mod: *Module) bool { - return mod.intern_pool.indexToKey(val.ip_index) == .runtime_value; + return mod.intern_pool.indexToKey(val.toIntern()) == .runtime_value; } /// Returns true if a Value is backed by a variable pub fn isVariable(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => true, .ptr => |ptr| switch (ptr.addr) { .decl => |decl_index| { @@ -1913,7 +1875,7 @@ pub const Value = struct { } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| variable.is_threadlocal, .ptr => |ptr| switch (ptr.addr) { .decl => |decl_index| { @@ -1943,55 +1905,30 @@ pub const Value = struct { start: usize, end: usize, ) error{OutOfMemory}!Value { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - return Tag.str_lit.create(arena, .{ - .index = @intCast(u32, str_lit.index + start), - .len = @intCast(u32, end - start), - }); - }, + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), + .mut_decl => |mut_decl| try mod.declPtr(mut_decl.decl).val.sliceArray(mod, arena, start, end), + .comptime_field => |comptime_field| try comptime_field.toValue().sliceArray(mod, arena, start, end), + .elem => |elem| try elem.base.toValue().sliceArray(mod, arena, start + elem.index, end + elem.index), else => unreachable, }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), - .mut_decl => |mut_decl| try mod.declPtr(mut_decl.decl).val.sliceArray(mod, arena, start, end), - .comptime_field => |comptime_field| try comptime_field.toValue().sliceArray(mod, arena, start, end), - .elem => |elem| try elem.base.toValue().sliceArray(mod, arena, start + elem.index, end + elem.index), - else => unreachable, + .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ + .ty = mod.intern_pool.typeOf(val.toIntern()), + .storage = switch (aggregate.storage) { + .bytes => |bytes| .{ .bytes = bytes[start..end] }, + .elems => |elems| .{ .elems = elems[start..end] }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, }, - .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ - .ty = mod.intern_pool.typeOf(val.ip_index), - .storage = switch (aggregate.storage) { - .bytes => |bytes| .{ .bytes = bytes[start..end] }, - .elems => |elems| .{ .elems = elems[start..end] }, - .repeated_elem => |elem| .{ .repeated_elem = elem }, - }, - } })).toValue(), - else => unreachable, - }, + } })).toValue(), + else => unreachable, }; } pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { - switch (val.ip_index) { + switch (val.toIntern()) { .undef => return Value.undef, - .none => switch (val.tag()) { - .aggregate => { - const field_values = val.castTag(.aggregate).?.data; - return field_values[index]; - }, - .@"union" => { - const payload = val.castTag(.@"union").?.data; - // TODO assert the tag is correct - return payload.val; - }, - else => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .aggregate => |aggregate| switch (aggregate.storage) { .bytes => |bytes| try mod.intern(.{ .int = .{ .ty = .u8_type, @@ -2000,6 +1937,8 @@ pub const Value = struct { .elems => |elems| elems[index], .repeated_elem => |elem| elem, }.toValue(), + // TODO assert the tag is correct + .un => |un| un.val.toValue(), else => unreachable, }, } @@ -2007,7 +1946,7 @@ pub const Value = struct { pub fn unionTag(val: Value, mod: *Module) Value { if (val.ip_index == .none) return val.castTag(.@"union").?.data.tag; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef, .enum_tag => val, .un => |un| un.tag.toValue(), else => unreachable, @@ -2022,12 +1961,12 @@ pub const Value = struct { mod: *Module, ) Allocator.Error!Value { const elem_ty = ty.elemType2(mod); - const ptr_val = switch (mod.intern_pool.indexToKey(val.ip_index)) { + const ptr_val = switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| ptr: { switch (ptr.addr) { .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod)) return (try mod.intern(.{ .ptr = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .addr = .{ .elem = .{ .base = elem.base, .index = elem.index + index, @@ -2043,9 +1982,9 @@ pub const Value = struct { else => val, }; return (try mod.intern(.{ .ptr = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .addr = .{ .elem = .{ - .base = ptr_val.ip_index, + .base = ptr_val.toIntern(), .index = index, } }, } })).toValue(); @@ -2053,7 +1992,7 @@ pub const Value = struct { pub fn isUndef(val: Value, mod: *Module) bool { if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => true, .simple_value => |v| v == .undefined, else => false, @@ -2070,15 +2009,9 @@ pub const Value = struct { /// Returns true if any value contained in `self` is undefined. pub fn anyUndef(val: Value, mod: *Module) !bool { if (val.ip_index == .none) return false; - return switch (val.ip_index) { + return switch (val.toIntern()) { .undef => true, - .none => switch (val.tag()) { - .aggregate => for (val.castTag(.aggregate).?.data) |field| { - if (try field.anyUndef(mod)) break true; - } else false, - else => false, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => true, .simple_value => |v| v == .undefined, .ptr => |ptr| switch (ptr.len) { @@ -2098,13 +2031,13 @@ pub const Value = struct { /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. pub fn isNull(val: Value, mod: *Module) bool { - return switch (val.ip_index) { + return switch (val.toIntern()) { .undef => unreachable, .unreachable_value => unreachable, .null_value => true, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => { var buf: BigIntSpace = undefined; return val.toBigInt(&buf, mod).eqZero(); @@ -2120,7 +2053,7 @@ pub const Value = struct { /// something is an error or not because it works without having to figure out the /// string. pub fn getError(self: Value, mod: *const Module) ?[]const u8 { - return mod.intern_pool.stringToSliceUnwrap(switch (mod.intern_pool.indexToKey(self.ip_index)) { + return mod.intern_pool.stringToSliceUnwrap(switch (mod.intern_pool.indexToKey(self.toIntern())) { .err => |err| err.name.toOptional(), .error_union => |error_union| switch (error_union.val) { .err_name => |err_name| err_name.toOptional(), @@ -2133,12 +2066,12 @@ pub const Value = struct { /// Assumes the type is an error union. Returns true if and only if the value is /// the error union payload, not an error. pub fn errorUnionIsPayload(val: Value, mod: *const Module) bool { - return mod.intern_pool.indexToKey(val.ip_index).error_union.val == .payload; + return mod.intern_pool.indexToKey(val.toIntern()).error_union.val == .payload; } /// Value of the optional, null if optional has no payload. pub fn optionalValue(val: Value, mod: *const Module) ?Value { - return switch (mod.intern_pool.indexToKey(val.ip_index).opt.val) { + return switch (mod.intern_pool.indexToKey(val.toIntern()).opt.val) { .none => null, else => |index| index.toValue(), }; @@ -2146,14 +2079,9 @@ pub const Value = struct { /// Valid for all types. Asserts the value is not undefined. pub fn isFloat(self: Value, mod: *const Module) bool { - return switch (self.ip_index) { + return switch (self.toIntern()) { .undef => unreachable, - .none => switch (self.tag()) { - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - else => false, - }, - else => switch (mod.intern_pool.indexToKey(self.ip_index)) { + else => switch (mod.intern_pool.indexToKey(self.toIntern())) { .float => true, else => false, }, @@ -2169,21 +2097,24 @@ pub const Value = struct { pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { if (int_ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, int_ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod)); const scalar_ty = float_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try intToFloatScalar(elem_val, scalar_ty, mod, opt_sema); + scalar.* = try (try intToFloatScalar(elem_val, scalar_ty, mod, opt_sema)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intToFloatScalar(val, float_ty, mod, opt_sema); } pub fn intToFloatScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - return switch (val.ip_index) { + return switch (val.toIntern()) { .undef => val, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| { const float = bigIntToFloat(big_int.limbs, big_int.positive); @@ -2217,7 +2148,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = dest_ty.ip_index, + .ty = dest_ty.toIntern(), .storage = storage, } })).toValue(); } @@ -2245,14 +2176,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); + scalar.* = try (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intAddSatScalar(lhs, rhs, ty, arena, mod); } @@ -2292,14 +2226,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); + scalar.* = try (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intSubSatScalar(lhs, rhs, ty, arena, mod); } @@ -2338,19 +2275,26 @@ pub const Value = struct { mod: *Module, ) !OverflowArithmeticResult { if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try arena.alloc(Value, ty.vectorLen(mod)); - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const vec_len = ty.vectorLen(mod); + const overflowed_data = try arena.alloc(InternPool.Index, vec_len); + const result_data = try arena.alloc(InternPool.Index, vec_len); const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod); @@ -2400,13 +2344,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return numberMulWrapScalar(lhs, rhs, ty, arena, mod); } @@ -2442,13 +2390,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intMulSatScalar(lhs, rhs, ty, arena, mod); } @@ -2515,12 +2467,16 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(mod), arena, mod); + scalar.* = try (try bitwiseNotScalar(elem_val, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseNotScalar(val, ty, arena, mod); } @@ -2552,13 +2508,17 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); + scalar.* = try (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseAndScalar(lhs, rhs, ty, allocator, mod); } @@ -2586,13 +2546,17 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseNandScalar(lhs, rhs, ty, arena, mod); } @@ -2609,13 +2573,17 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); + scalar.* = try (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseOrScalar(lhs, rhs, ty, allocator, mod); } @@ -2642,14 +2610,17 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseXorScalar(lhs, rhs, ty, allocator, mod); } @@ -2676,14 +2647,17 @@ pub const Value = struct { pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intDivScalar(lhs, rhs, ty, allocator, mod); } @@ -2715,14 +2689,17 @@ pub const Value = struct { pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intDivFloorScalar(lhs, rhs, ty, allocator, mod); } @@ -2754,14 +2731,17 @@ pub const Value = struct { pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intModScalar(lhs, rhs, ty, allocator, mod); } @@ -2794,7 +2774,7 @@ pub const Value = struct { /// Returns true if the value is a floating point type and is NaN. Returns false otherwise. pub fn isNan(val: Value, mod: *const Module) bool { if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .float => |float| switch (float.storage) { inline else => |x| std.math.isNan(x), }, @@ -2805,7 +2785,7 @@ pub const Value = struct { /// Returns true if the value is a floating point type and is infinite. Returns false otherwise. pub fn isInf(val: Value, mod: *const Module) bool { if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .float => |float| switch (float.storage) { inline else => |x| std.math.isInf(x), }, @@ -2815,7 +2795,7 @@ pub const Value = struct { pub fn isNegativeInf(val: Value, mod: *const Module) bool { if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .float => |float| switch (float.storage) { inline else => |x| std.math.isNegativeInf(x), }, @@ -2825,13 +2805,17 @@ pub const Value = struct { pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatRemScalar(lhs, rhs, float_type, mod); } @@ -2847,20 +2831,24 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatModScalar(lhs, rhs, float_type, mod); } @@ -2876,21 +2864,24 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intMulScalar(lhs, rhs, ty, allocator, mod); } @@ -2918,13 +2909,16 @@ pub const Value = struct { pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod); + scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intTruncScalar(val, ty, allocator, signedness, bits, mod); } @@ -2939,14 +2933,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); const bits_elem = try bits.elemValue(mod, i); - scalar.* = try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod); + scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intTruncScalar(val, ty, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); } @@ -2976,14 +2973,17 @@ pub const Value = struct { pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shlScalar(lhs, rhs, ty, allocator, mod); } @@ -3015,18 +3015,26 @@ pub const Value = struct { mod: *Module, ) !OverflowArithmeticResult { if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try allocator.alloc(Value, ty.vectorLen(mod)); - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); - for (result_data, 0..) |*scalar, i| { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try allocator.alloc(InternPool.Index, vec_len); + const result_data = try allocator.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(allocator, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(allocator, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod); @@ -3071,13 +3079,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shlSatScalar(lhs, rhs, ty, arena, mod); } @@ -3117,13 +3129,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shlTruncScalar(lhs, rhs, ty, arena, mod); } @@ -3143,14 +3159,17 @@ pub const Value = struct { pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shrScalar(lhs, rhs, ty, allocator, mod); } @@ -3193,12 +3212,16 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try floatNegScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatNegScalar(val, float_type, mod); } @@ -3218,7 +3241,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3231,13 +3254,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatAddScalar(lhs, rhs, float_type, mod); } @@ -3258,7 +3285,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3271,13 +3298,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatSubScalar(lhs, rhs, float_type, mod); } @@ -3298,7 +3329,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3311,13 +3342,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatDivScalar(lhs, rhs, float_type, mod); } @@ -3338,7 +3373,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3351,13 +3386,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatDivFloorScalar(lhs, rhs, float_type, mod); } @@ -3378,7 +3417,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3391,13 +3430,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatDivTruncScalar(lhs, rhs, float_type, mod); } @@ -3418,7 +3461,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3431,13 +3474,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatMulScalar(lhs, rhs, float_type, mod); } @@ -3458,19 +3505,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try sqrtScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sqrtScalar(val, float_type, mod); } @@ -3486,19 +3537,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try sinScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sinScalar(val, float_type, mod); } @@ -3514,19 +3569,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try cosScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return cosScalar(val, float_type, mod); } @@ -3542,19 +3601,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try tanScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return tanScalar(val, float_type, mod); } @@ -3570,19 +3633,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try expScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try expScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return expScalar(val, float_type, mod); } @@ -3598,19 +3665,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try exp2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return exp2Scalar(val, float_type, mod); } @@ -3626,19 +3697,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try logScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try logScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return logScalar(val, float_type, mod); } @@ -3654,19 +3729,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try log2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return log2Scalar(val, float_type, mod); } @@ -3682,19 +3761,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try log10Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return log10Scalar(val, float_type, mod); } @@ -3710,19 +3793,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try fabsScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return fabsScalar(val, float_type, mod); } @@ -3738,19 +3825,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try floorScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floorScalar(val, float_type, mod); } @@ -3766,19 +3857,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try ceilScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return ceilScalar(val, float_type, mod); } @@ -3794,19 +3889,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try roundScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return roundScalar(val, float_type, mod); } @@ -3822,19 +3921,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try truncScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return truncScalar(val, float_type, mod); } @@ -3850,7 +3953,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3864,20 +3967,18 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const mulend1_elem = try mulend1.elemValue(mod, i); const mulend2_elem = try mulend2.elemValue(mod, i); const addend_elem = try addend.elemValue(mod, i); - scalar.* = try mulAddScalar( - float_type.scalarType(mod), - mulend1_elem, - mulend2_elem, - addend_elem, - mod, - ); + scalar.* = try (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return mulAddScalar(float_type, mulend1, mulend2, addend, mod); } @@ -3899,7 +4000,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3931,22 +4032,22 @@ pub const Value = struct { } pub fn isGenericPoison(val: Value) bool { - return val.ip_index == .generic_poison; + return val.toIntern() == .generic_poison; } /// This type is not copyable since it may contain pointers to its inner data. pub const Payload = struct { tag: Tag, - pub const Bytes = struct { + pub const SubValue = struct { base: Payload, - /// Includes the sentinel, if any. - data: []const u8, + data: Value, }; - pub const StrLit = struct { + pub const Bytes = struct { base: Payload, - data: Module.StringLiteralContext.Key, + /// Includes the sentinel, if any. + data: []const u8, }; pub const Aggregate = struct { From 72e4ea38216aab7e7ed05978d04c5d32de44b5ce Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 25 May 2023 07:08:48 -0400 Subject: [PATCH 097/205] InternPool: fix crashes up to in progress comptime mutation --- src/InternPool.zig | 45 +++++++++----- src/Sema.zig | 140 +++++++++++++++++++++---------------------- src/codegen/llvm.zig | 15 ++++- src/value.zig | 23 ++++++- 4 files changed, 132 insertions(+), 91 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 1dc43a467d22..429b86a8a69b 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -730,21 +730,11 @@ pub const Key = union(enum) { switch (aggregate.storage) { .bytes => unreachable, - .elems => |elems| { - var buffer: Key.Int.Storage.BigIntSpace = undefined; - for (elems) |elem| std.hash.autoHash( - hasher, - ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch - unreachable, - ); - }, + .elems => |elems| for (elems) |elem| std.hash.autoHash(hasher, elem), .repeated_elem => |elem| { const len = ip.aggregateTypeLen(aggregate.ty); - var buffer: Key.Int.Storage.BigIntSpace = undefined; - const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch - unreachable; var i: u64 = 0; - while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); + while (i < len) : (i += 1) std.hash.autoHash(hasher, elem); }, } }, @@ -2044,6 +2034,10 @@ pub const Alignment = enum(u6) { assert(n != 0); return fromByteUnits(n); } + + pub fn min(a: Alignment, b: Alignment) Alignment { + return @intToEnum(Alignment, @min(@enumToInt(a), @enumToInt(b))); + } }; /// Used for non-sentineled arrays that have length fitting in u32, as well as @@ -3514,16 +3508,35 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const ty_key = ip.indexToKey(aggregate.ty); const aggregate_len = ip.aggregateTypeLen(aggregate.ty); switch (aggregate.storage) { - .bytes => { + .bytes => |bytes| { assert(ty_key.array_type.child == .u8_type); + assert(bytes.len == aggregate_len); }, .elems => |elems| { assert(elems.len == aggregate_len); - for (elems) |elem| assert(elem != .none); }, - .repeated_elem => |elem| { - assert(elem != .none); + .repeated_elem => {}, + } + switch (ty_key) { + inline .array_type, .vector_type => |seq_type| { + for (aggregate.storage.values()) |elem| { + assert(ip.typeOf(elem) == seq_type.child); + } + }, + .struct_type => |struct_type| { + for ( + aggregate.storage.values(), + ip.structPtrUnwrapConst(struct_type.index).?.fields.values(), + ) |elem, field| { + assert(ip.typeOf(elem) == field.ty.toIntern()); + } + }, + .anon_struct_type => |anon_struct_type| { + for (aggregate.storage.values(), anon_struct_type.types) |elem, ty| { + assert(ip.typeOf(elem) == ty); + } }, + else => unreachable, } if (aggregate_len == 0) { diff --git a/src/Sema.zig b/src/Sema.zig index 4478f26bf45e..a7416af28642 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -10252,7 +10252,7 @@ fn zirSwitchCapture( if (try sema.resolveDefinedValue(block, operand_src, operand)) |operand_val| { return sema.addConstant( first_field.ty, - operand_val.castTag(.@"union").?.data.val, + mod.intern_pool.indexToKey(operand_val.toIntern()).un.val.toValue(), ); } try sema.requireRuntimeBlock(block, operand_src, null); @@ -19042,10 +19042,10 @@ fn zirReify( const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstValue(block, operand_src, type_info, "operand to @Type must be comptime-known"); - const union_val = val.cast(Value.Payload.Union).?.data; + const union_val = mod.intern_pool.indexToKey(val.toIntern()).un; const target = mod.getTarget(); - const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag, mod).?; - if (try union_val.val.anyUndef(mod)) return sema.failWithUseOfUndef(block, src); + if (try union_val.val.toValue().anyUndef(mod)) return sema.failWithUseOfUndef(block, src); + const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag.toValue(), mod).?; const ip = &mod.intern_pool; switch (@intToEnum(std.builtin.TypeId, tag_index)) { .Type => return Air.Inst.Ref.type_type, @@ -19059,9 +19059,9 @@ fn zirReify( .AnyFrame => return sema.failWithUseOfAsync(block, src), .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { - const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); - const signedness_val = try union_val.val.fieldValue(mod, fields.getIndex("signedness").?); - const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const signedness_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("signedness").?); + const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("bits").?); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); @@ -19069,9 +19069,9 @@ fn zirReify( return sema.addType(ty); }, .Vector => { - const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); - const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); - const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("len").?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("child").?); const len = @intCast(u32, len_val.toUnsignedInt(mod)); const child_ty = child_val.toType(); @@ -19085,8 +19085,8 @@ fn zirReify( return sema.addType(ty); }, .Float => { - const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); - const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("bits").?); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = switch (bits) { @@ -19100,15 +19100,15 @@ fn zirReify( return sema.addType(ty); }, .Pointer => { - const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); - const size_val = try union_val.val.fieldValue(mod, fields.getIndex("size").?); - const is_const_val = try union_val.val.fieldValue(mod, fields.getIndex("is_const").?); - const is_volatile_val = try union_val.val.fieldValue(mod, fields.getIndex("is_volatile").?); - const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?); - const address_space_val = try union_val.val.fieldValue(mod, fields.getIndex("address_space").?); - const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); - const is_allowzero_val = try union_val.val.fieldValue(mod, fields.getIndex("is_allowzero").?); - const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const size_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("size").?); + const is_const_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_const").?); + const is_volatile_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_volatile").?); + const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("alignment").?); + const address_space_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("address_space").?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("child").?); + const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_allowzero").?); + const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("sentinel").?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); @@ -19191,10 +19191,10 @@ fn zirReify( return sema.addType(ty); }, .Array => { - const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); - const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); - const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); - const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("len").?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("child").?); + const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("sentinel").?); const len = len_val.toUnsignedInt(mod); const child_ty = child_val.toType(); @@ -19210,8 +19210,8 @@ fn zirReify( return sema.addType(ty); }, .Optional => { - const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); - const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("child").?); const child_ty = child_val.toType(); @@ -19219,9 +19219,9 @@ fn zirReify( return sema.addType(ty); }, .ErrorUnion => { - const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); - const error_set_val = try union_val.val.fieldValue(mod, fields.getIndex("error_set").?); - const payload_val = try union_val.val.fieldValue(mod, fields.getIndex("payload").?); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const error_set_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("error_set").?); + const payload_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("payload").?); const error_set_ty = error_set_val.toType(); const payload_ty = payload_val.toType(); @@ -19234,7 +19234,7 @@ fn zirReify( return sema.addType(ty); }, .ErrorSet => { - const payload_val = union_val.val.optionalValue(mod) orelse + const payload_val = union_val.val.toValue().optionalValue(mod) orelse return sema.addType(Type.anyerror); const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod)); @@ -19258,12 +19258,12 @@ fn zirReify( return sema.addType(ty); }, .Struct => { - const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); - const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?); - const backing_integer_val = try union_val.val.fieldValue(mod, fields.getIndex("backing_integer").?); - const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); - const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); - const is_tuple_val = try union_val.val.fieldValue(mod, fields.getIndex("is_tuple").?); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("layout").?); + const backing_integer_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("backing_integer").?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("fields").?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("decls").?); + const is_tuple_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_tuple").?); const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -19279,11 +19279,11 @@ fn zirReify( return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool(mod)); }, .Enum => { - const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); - const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?); - const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); - const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); - const is_exhaustive_val = try union_val.val.fieldValue(mod, fields.getIndex("is_exhaustive").?); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("tag_type").?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("fields").?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("decls").?); + const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_exhaustive").?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19318,7 +19318,7 @@ fn zirReify( .nonexhaustive else .explicit, - .tag_ty = int_tag_ty.ip_index, + .tag_ty = int_tag_ty.toIntern(), }); errdefer mod.intern_pool.remove(incomplete_enum.index); @@ -19360,7 +19360,7 @@ fn zirReify( return sema.failWithOwnedErrorMsg(msg); } - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, value_val.ip_index)) |other| { + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, value_val.toIntern())) |other| { const msg = msg: { const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)}); errdefer msg.destroy(gpa); @@ -19375,8 +19375,8 @@ fn zirReify( return sema.analyzeDeclVal(block, src, new_decl_index); }, .Opaque => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("decls").?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19419,11 +19419,11 @@ fn zirReify( return sema.analyzeDeclVal(block, src, new_decl_index); }, .Union => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?); - const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?); - const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); - const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("layout").?); + const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("tag_type").?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("fields").?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("decls").?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19491,7 +19491,7 @@ fn zirReify( if (tag_type_val.optionalValue(mod)) |payload_val| { union_obj.tag_ty = payload_val.toType(); - const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) { + const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.toIntern())) { .enum_type => |x| x, else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}), }; @@ -19620,13 +19620,13 @@ fn zirReify( return sema.analyzeDeclVal(block, src, new_decl_index); }, .Fn => { - const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); - const calling_convention_val = try union_val.val.fieldValue(mod, fields.getIndex("calling_convention").?); - const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?); - const is_generic_val = try union_val.val.fieldValue(mod, fields.getIndex("is_generic").?); - const is_var_args_val = try union_val.val.fieldValue(mod, fields.getIndex("is_var_args").?); - const return_type_val = try union_val.val.fieldValue(mod, fields.getIndex("return_type").?); - const params_val = try union_val.val.fieldValue(mod, fields.getIndex("params").?); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const calling_convention_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("calling_convention").?); + const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("alignment").?); + const is_generic_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_generic").?); + const is_var_args_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_var_args").?); + const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("return_type").?); + const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("params").?); const is_generic = is_generic_val.toBool(mod); if (is_generic) { @@ -25204,12 +25204,12 @@ fn unionFieldPtr( if (union_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, src); } - const tag_and_val = union_val.castTag(.@"union").?.data; + const un = mod.intern_pool.indexToKey(union_val.toIntern()).un; const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); + const tag_matches = un.tag == field_tag.toIntern(); if (!tag_matches) { const msg = msg: { - const active_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, mod).?; + const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?; const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); errdefer msg.destroy(sema.gpa); @@ -25269,16 +25269,16 @@ fn unionFieldVal( if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| { if (union_val.isUndef(mod)) return sema.addConstUndef(field.ty); - const tag_and_val = union_val.castTag(.@"union").?.data; + const un = mod.intern_pool.indexToKey(union_val.toIntern()).un; const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); + const tag_matches = un.tag == field_tag.toIntern(); switch (union_obj.layout) { .Auto => { if (tag_matches) { - return sema.addConstant(field.ty, tag_and_val.val); + return sema.addConstant(field.ty, un.val.toValue()); } else { const msg = msg: { - const active_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, mod).?; + const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?; const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); errdefer msg.destroy(sema.gpa); @@ -25290,10 +25290,10 @@ fn unionFieldVal( }, .Packed, .Extern => { if (tag_matches) { - return sema.addConstant(field.ty, tag_and_val.val); + return sema.addConstant(field.ty, un.val.toValue()); } else { - const old_ty = union_ty.unionFieldType(tag_and_val.tag, mod); - if (try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0)) |new_val| { + const old_ty = union_ty.unionFieldType(un.tag.toValue(), mod); + if (try sema.bitCastVal(block, src, un.val.toValue(), old_ty, field.ty, 0)) |new_val| { return sema.addConstant(field.ty, new_val); } } @@ -27626,7 +27626,7 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { // allocations is relevant to this function, or why it would have // different behavior depending on whether the types were inferred. // Something seems wrong here. - switch (prev_ptr_ty.ip_index) { + switch (prev_ptr_ty.toIntern()) { .inferred_alloc_mut_type, .inferred_alloc_const_type => return null, else => {}, } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index e54b951aa604..0c12faf75183 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3208,7 +3208,8 @@ pub const DeclGen = struct { return llvm_type.getUndef(); } - switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { + const val_key = mod.intern_pool.indexToKey(tv.val.toIntern()); + switch (val_key) { .int_type, .ptr_type, .array_type, @@ -3242,10 +3243,18 @@ pub const DeclGen = struct { }, }, .variable, - .extern_func, - .func, .enum_literal, => unreachable, // non-runtime values + .extern_func, .func => { + const fn_decl_index = switch (val_key) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + else => unreachable, + }; + const fn_decl = dg.module.declPtr(fn_decl_index); + dg.module.markDeclAlive(fn_decl); + return dg.resolveLlvmFunction(fn_decl_index); + }, .int => |int| { var bigint_space: Value.BigIntSpace = undefined; const bigint = int.storage.toBigInt(&bigint_space); diff --git a/src/value.zig b/src/value.zig index ef3a3f6be18d..02f4422dda0b 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1961,12 +1961,31 @@ pub const Value = struct { mod: *Module, ) Allocator.Error!Value { const elem_ty = ty.elemType2(mod); + const ptr_ty_key = mod.intern_pool.indexToKey(ty.toIntern()).ptr_type; + assert(ptr_ty_key.host_size == 0); + assert(ptr_ty_key.bit_offset == 0); + assert(ptr_ty_key.vector_index == .none); + const elem_alignment = InternPool.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); + const alignment = switch (ptr_ty_key.alignment) { + .none => .none, + else => ptr_ty_key.alignment.min( + @intToEnum(InternPool.Alignment, @ctz(index * elem_ty.abiSize(mod))), + ), + }; + const ptr_ty = try mod.ptrType(.{ + .elem_type = elem_ty.toIntern(), + .alignment = if (alignment == elem_alignment) .none else alignment, + .is_const = ptr_ty_key.is_const, + .is_volatile = ptr_ty_key.is_volatile, + .is_allowzero = ptr_ty_key.is_allowzero, + .address_space = ptr_ty_key.address_space, + }); const ptr_val = switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| ptr: { switch (ptr.addr) { .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod)) return (try mod.intern(.{ .ptr = .{ - .ty = ty.toIntern(), + .ty = ptr_ty.toIntern(), .addr = .{ .elem = .{ .base = elem.base, .index = elem.index + index, @@ -1982,7 +2001,7 @@ pub const Value = struct { else => val, }; return (try mod.intern(.{ .ptr = .{ - .ty = ty.toIntern(), + .ty = ptr_ty.toIntern(), .addr = .{ .elem = .{ .base = ptr_val.toIntern(), .index = index, From 70cc68e9994f7dca53904075e15b2b6f87342539 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 25 May 2023 19:23:01 -0400 Subject: [PATCH 098/205] Air: remove constant tag Some uses have been moved to their own tag, the rest use interned. Also, finish porting comptime mutation to be more InternPool aware. --- src/Air.zig | 18 +- src/InternPool.zig | 18 +- src/Liveness.zig | 22 +- src/Liveness/Verify.zig | 40 +- src/Module.zig | 9 +- src/Sema.zig | 1245 ++++++++++++++++----------------- src/TypedValue.zig | 54 ++ src/arch/aarch64/CodeGen.zig | 12 +- src/arch/arm/CodeGen.zig | 12 +- src/arch/riscv64/CodeGen.zig | 12 +- src/arch/sparc64/CodeGen.zig | 12 +- src/arch/wasm/CodeGen.zig | 5 +- src/arch/x86_64/CodeGen.zig | 19 +- src/codegen/c.zig | 5 +- src/codegen/llvm.zig | 3 +- src/codegen/spirv.zig | 1 - src/print_air.zig | 12 +- src/value.zig | 198 ++++-- tools/lldb_pretty_printers.py | 6 + 19 files changed, 863 insertions(+), 840 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 4f36cf8bc138..95ed7d33f164 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -186,6 +186,14 @@ pub const Inst = struct { /// Allocates stack local memory. /// Uses the `ty` field. alloc, + /// This is a special value that tracks a set of types that have been stored + /// to an inferred allocation. It does not support any of the normal value queries. + /// Uses the `ty_pl` field, payload is an index of `values` array. + inferred_alloc, + /// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc + /// instructions for comptime code. + /// Uses the `ty_pl` field, payload is an index of `values` array. + inferred_alloc_comptime, /// If the function will pass the result by-ref, this instruction returns the /// result pointer. Otherwise it is equivalent to `alloc`. /// Uses the `ty` field. @@ -397,9 +405,6 @@ pub const Inst = struct { /// was executed on the operand. /// Uses the `ty_pl` field. Payload is `TryPtr`. try_ptr, - /// A comptime-known value. Uses the `ty_pl` field, payload is index of - /// `values` array. - constant, /// A comptime-known value via an index into the InternPool. /// Uses the `interned` field. interned, @@ -1265,7 +1270,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .assembly, .block, - .constant, .struct_field_ptr, .struct_field_val, .slice_elem_ptr, @@ -1283,6 +1287,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .sub_with_overflow, .mul_with_overflow, .shl_with_overflow, + .inferred_alloc, + .inferred_alloc_comptime, .ptr_add, .ptr_sub, .try_ptr, @@ -1495,7 +1501,6 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index); const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { - .constant => return air.values[air_datas[inst_index].ty_pl.payload], .interned => return air_datas[inst_index].interned.toValue(), else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), } @@ -1603,6 +1608,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { .mul_with_overflow, .shl_with_overflow, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, .bit_and, .bit_or, @@ -1651,7 +1658,6 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { .cmp_neq_optimized, .cmp_vector, .cmp_vector_optimized, - .constant, .interned, .is_null, .is_non_null, diff --git a/src/InternPool.zig b/src/InternPool.zig index 429b86a8a69b..dfde35260034 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -515,10 +515,12 @@ pub const Key = union(enum) { pub const ErrorUnion = struct { ty: Index, - val: union(enum) { + val: Value, + + pub const Value = union(enum) { err_name: NullTerminatedString, payload: Index, - }, + }; }; pub const EnumTag = struct { @@ -1068,7 +1070,7 @@ pub const Key = union(enum) { .false, .true => .bool_type, .empty_struct => .empty_struct_type, .@"unreachable" => .noreturn_type, - .generic_poison => unreachable, + .generic_poison => .generic_poison_type, }, }; } @@ -2671,6 +2673,10 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .only_possible_value => { const ty = @intToEnum(Index, data); return switch (ip.indexToKey(ty)) { + .array_type, .vector_type => .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = &.{} }, + } }, // TODO: migrate structs to properly use the InternPool rather // than using the SegmentedList trick, then the struct type will // have a slice of comptime values that can be used here for when @@ -3184,7 +3190,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ - .tag = .ptr_elem, + .tag = switch (ptr.addr) { + .elem => .ptr_elem, + .field => .ptr_field, + else => unreachable, + }, .data = try ip.addExtra(gpa, PtrBaseIndex{ .ty = ptr.ty, .base = base_index.base, diff --git a/src/Liveness.zig b/src/Liveness.zig index c30708e1400f..4f3d87d3c200 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -321,8 +321,9 @@ pub fn categorizeOperand( .arg, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, - .constant, .interned, .trap, .breakpoint, @@ -973,9 +974,7 @@ fn analyzeInst( .work_group_id, => return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }), - .constant, - .interned, - => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .trap, .unreach, @@ -1269,10 +1268,7 @@ fn analyzeOperands( const operand = Air.refToIndexAllowNone(op_ref) orelse continue; // Don't compute any liveness for constants - switch (inst_tags[operand]) { - .constant, .interned => continue, - else => {}, - } + if (inst_tags[operand] == .interned) continue; _ = try data.live_set.put(gpa, operand, {}); } @@ -1305,10 +1301,7 @@ fn analyzeOperands( const operand = Air.refToIndexAllowNone(op_ref) orelse continue; // Don't compute any liveness for constants - switch (inst_tags[operand]) { - .constant, .interned => continue, - else => {}, - } + if (inst_tags[operand] == .interned) continue; const mask = @as(Bpi, 1) << @intCast(OperandInt, i); @@ -1839,10 +1832,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // Don't compute any liveness for constants const inst_tags = big.a.air.instructions.items(.tag); - switch (inst_tags[operand]) { - .constant, .interned => return, - else => {}, - } + if (inst_tags[operand] == .interned) return // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index 703d561559d3..cbb7f9f1435b 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -41,8 +41,9 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { // no operands .arg, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, - .constant, .interned, .breakpoint, .dbg_stmt, @@ -554,16 +555,18 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err } fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void { - const operand = Air.refToIndexAllowNone(op_ref) orelse return; - switch (self.air.instructions.items(.tag)[operand]) { - .constant, .interned => {}, - else => { - if (dies) { - if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); - } else { - if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand }); - } - }, + const operand = Air.refToIndexAllowNone(op_ref) orelse { + assert(!dies); + return; + }; + if (self.air.instructions.items(.tag)[operand] == .interned) { + assert(!dies); + return; + } + if (dies) { + if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); + } else { + if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand }); } } @@ -576,16 +579,11 @@ fn verifyInst( const dies = self.liveness.operandDies(inst, @intCast(Liveness.OperandInt, operand_index)); try self.verifyOperand(inst, operand, dies); } - const tag = self.air.instructions.items(.tag); - switch (tag[inst]) { - .constant, .interned => unreachable, - else => { - if (self.liveness.isUnused(inst)) { - assert(!self.live.contains(inst)); - } else { - try self.live.putNoClobber(self.gpa, inst, {}); - } - }, + if (self.air.instructions.items(.tag)[inst] == .interned) return; + if (self.liveness.isUnused(inst)) { + assert(!self.live.contains(inst)); + } else { + try self.live.putNoClobber(self.gpa, inst, {}); } } diff --git a/src/Module.zig b/src/Module.zig index 47f7643b9fe7..d3045631c5f7 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -764,14 +764,7 @@ pub const Decl = struct { pub fn typedValue(decl: Decl) error{AnalysisFail}!TypedValue { if (!decl.has_tv) return error.AnalysisFail; - return TypedValue{ - .ty = decl.ty, - .val = decl.val, - }; - } - - pub fn value(decl: *Decl) error{AnalysisFail}!Value { - return (try decl.typedValue()).val; + return TypedValue{ .ty = decl.ty, .val = decl.val }; } pub fn isFunction(decl: Decl, mod: *const Module) !bool { diff --git a/src/Sema.zig b/src/Sema.zig index a7416af28642..55adc2fffbbc 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1991,23 +1991,21 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( const i = int - InternPool.static_len; const air_tags = sema.air_instructions.items(.tag); if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| { - if (air_tags[i] == .constant) { - const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; - const val = sema.air_values.items[ty_pl.payload]; + if (air_tags[i] == .interned) { + const interned = sema.air_instructions.items(.data)[i].interned; + const val = interned.toValue(); if (val.getVariable(sema.mod) != null) return val; } return opv; } const air_datas = sema.air_instructions.items(.data); switch (air_tags[i]) { - .constant => { - const ty_pl = air_datas[i].ty_pl; - const val = sema.air_values.items[ty_pl.payload]; + .interned => { + const val = air_datas[i].interned.toValue(); if (val.isRuntimeValue(sema.mod)) make_runtime.* = true; if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, - .interned => return air_datas[i].interned.toValue(), else => return null, } } @@ -2440,64 +2438,64 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const addr_space = target_util.defaultAddressSpace(target, .local); if (Air.refToIndex(ptr)) |ptr_inst| { - if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) { - const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { - .inferred_alloc => { - const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data; - // Add the stored instruction to the set we will use to resolve peer types - // for the inferred allocation. - // This instruction will not make it to codegen; it is only to participate - // in the `stored_inst_list` of the `inferred_alloc`. - var trash_block = block.makeSubBlock(); - defer trash_block.instructions.deinit(sema.gpa); - const operand = try trash_block.addBitCast(pointee_ty, .void_value); - - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = pointee_ty, - .@"align" = inferred_alloc.alignment, - .@"addrspace" = addr_space, - }); - const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); + switch (sema.air_instructions.items(.tag)[ptr_inst]) { + .inferred_alloc => { + const air_datas = sema.air_instructions.items(.data); + const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; + const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data; + // Add the stored instruction to the set we will use to resolve peer types + // for the inferred allocation. + // This instruction will not make it to codegen; it is only to participate + // in the `stored_inst_list` of the `inferred_alloc`. + var trash_block = block.makeSubBlock(); + defer trash_block.instructions.deinit(sema.gpa); + const operand = try trash_block.addBitCast(pointee_ty, .void_value); + + const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + .pointee_type = pointee_ty, + .@"align" = inferred_alloc.alignment, + .@"addrspace" = addr_space, + }); + const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); - try inferred_alloc.prongs.append(sema.arena, .{ - .stored_inst = operand, - .placeholder = Air.refToIndex(bitcasted_ptr).?, - }); + try inferred_alloc.prongs.append(sema.arena, .{ + .stored_inst = operand, + .placeholder = Air.refToIndex(bitcasted_ptr).?, + }); - return bitcasted_ptr; - }, - .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; - // There will be only one coerce_result_ptr because we are running at comptime. - // The alloc will turn into a Decl. - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - iac.data.decl_index = try anon_decl.finish( - pointee_ty, - Value.undef, - iac.data.alignment, - ); - if (iac.data.alignment != 0) { - try sema.resolveTypeLayout(pointee_ty); - } - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = pointee_ty, - .@"align" = iac.data.alignment, - .@"addrspace" = addr_space, - }); - try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); - return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ - .ty = ptr_ty.toIntern(), - .addr = .{ .mut_decl = .{ - .decl = iac.data.decl_index, - .runtime_index = block.runtime_index, - } }, - } })).toValue()); - }, - else => {}, - } + return bitcasted_ptr; + }, + .inferred_alloc_comptime => { + const air_datas = sema.air_instructions.items(.data); + const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; + const iac = ptr_val.castTag(.inferred_alloc_comptime).?; + // There will be only one coerce_result_ptr because we are running at comptime. + // The alloc will turn into a Decl. + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + iac.data.decl_index = try anon_decl.finish( + pointee_ty, + Value.undef, + iac.data.alignment, + ); + if (iac.data.alignment != 0) { + try sema.resolveTypeLayout(pointee_ty); + } + const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + .pointee_type = pointee_ty, + .@"align" = iac.data.alignment, + .@"addrspace" = addr_space, + }); + try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); + return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ + .ty = ptr_ty.toIntern(), + .addr = .{ .mut_decl = .{ + .decl = iac.data.decl_index, + .runtime_index = block.runtime_index, + } }, + } })).toValue()); + }, + else => {}, } } @@ -3458,6 +3456,7 @@ fn zirAllocExtended( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const gpa = sema.gpa; const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = extra.data.src_node }; const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = extra.data.src_node }; @@ -3487,13 +3486,19 @@ fn zirAllocExtended( if (small.has_type) { return sema.analyzeComptimeAlloc(block, var_ty, alignment); } else { - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl_index = undefined, - .alignment = alignment, - }), - ); + const ty_inst = try sema.addType(inferred_alloc_ty); + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + .decl_index = undefined, + .alignment = alignment, + })); + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } } @@ -3511,17 +3516,19 @@ fn zirAllocExtended( return block.addTy(.alloc, ptr_type); } - // `Sema.addConstant` does not add the instruction to the block because it is - // not needed in the case of constant values. However here, we plan to "downgrade" - // to a normal instruction when we hit `resolve_inferred_alloc`. So we append - // to the block even though it is currently a `.constant`. - const result = try sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = alignment }), - ); - try block.instructions.append(sema.gpa, Air.refToIndex(result).?); - try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {}); - return result; + const ty_inst = try sema.addType(inferred_alloc_ty); + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc.create(sema.arena, .{ + .alignment = alignment, + })); + const result_index = try block.addInstAsIndex(.{ + .tag = .inferred_alloc, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, {}); + return Air.indexToRef(result_index); } fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3616,16 +3623,24 @@ fn zirAllocInferredComptime( inst: Zir.Inst.Index, inferred_alloc_ty: Type, ) CompileError!Air.Inst.Ref { + const gpa = sema.gpa; const src_node = sema.code.instructions.items(.data)[inst].node; const src = LazySrcLoc.nodeOffset(src_node); sema.src = src; - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl_index = undefined, - .alignment = 0, - }), - ); + + const ty_inst = try sema.addType(inferred_alloc_ty); + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + .decl_index = undefined, + .alignment = 0, + })); + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3676,31 +3691,39 @@ fn zirAllocInferred( const tracy = trace(@src()); defer tracy.end(); + const gpa = sema.gpa; const src_node = sema.code.instructions.items(.data)[inst].node; const src = LazySrcLoc.nodeOffset(src_node); sema.src = src; + const ty_inst = try sema.addType(inferred_alloc_ty); if (block.is_comptime) { - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl_index = undefined, - .alignment = 0, - }), - ); + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + .decl_index = undefined, + .alignment = 0, + })); + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } - // `Sema.addConstant` does not add the instruction to the block because it is - // not needed in the case of constant values. However here, we plan to "downgrade" - // to a normal instruction when we hit `resolve_inferred_alloc`. So we append - // to the block even though it is currently a `.constant`. - const result = try sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = 0 }), - ); - try block.instructions.append(sema.gpa, Air.refToIndex(result).?); - try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {}); - return result; + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc.create(sema.arena, .{ + .alignment = 0, + })); + const result_index = try block.addInstAsIndex(.{ + .tag = .inferred_alloc, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, {}); + return Air.indexToRef(result_index); } fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { @@ -3712,7 +3735,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const ptr = try sema.resolveInst(inst_data.operand); const ptr_inst = Air.refToIndex(ptr).?; - assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload; const ptr_val = sema.air_values.items[value_index]; const var_is_mut = switch (sema.typeOf(ptr).toIntern()) { @@ -3722,7 +3744,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com }; const target = sema.mod.getTarget(); - switch (ptr_val.tag()) { + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { const iac = ptr_val.castTag(.inferred_alloc_comptime).?; const decl_index = iac.data.decl_index; @@ -3767,7 +3789,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Detect if the value is comptime-known. In such case, the // last 3 AIR instructions of the block will look like this: // - // %a = constant + // %a = interned // %b = bitcast(%a) // %c = store(%b, %d) // @@ -3814,7 +3836,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const candidate = block.instructions.items[search_index]; switch (air_tags[candidate]) { .dbg_stmt, .dbg_block_begin, .dbg_block_end => continue, - .constant => break candidate, + .interned => break candidate, else => break :ct, } }; @@ -4981,15 +5003,15 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const src: LazySrcLoc = sema.src; blk: { const ptr_inst = Air.refToIndex(ptr) orelse break :blk; - if (sema.air_instructions.items(.tag)[ptr_inst] != .constant) break :blk; - const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { + const air_data = sema.air_instructions.items(.data)[ptr_inst]; + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { + const ptr_val = sema.air_values.items[air_data.ty_pl.payload]; const iac = ptr_val.castTag(.inferred_alloc_comptime).?; return sema.storeToInferredAllocComptime(block, src, operand, iac); }, .inferred_alloc => { + const ptr_val = sema.air_values.items[air_data.ty_pl.payload]; const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc); }, @@ -5009,11 +5031,10 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi const ptr = try sema.resolveInst(bin_inst.lhs); const operand = try sema.resolveInst(bin_inst.rhs); const ptr_inst = Air.refToIndex(ptr).?; - assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { const iac = ptr_val.castTag(.inferred_alloc_comptime).?; return sema.storeToInferredAllocComptime(block, src, operand, iac); @@ -6988,16 +7009,7 @@ fn analyzeCall( const res2: Air.Inst.Ref = res2: { if (should_memoize and is_comptime_call) { if (mod.memoized_calls.getContext(memoized_call_key, .{ .module = mod })) |result| { - const ty_inst = try sema.addType(fn_ret_ty); - try sema.air_values.append(gpa, result.val); - sema.air_instructions.set(block_inst, .{ - .tag = .constant, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), - } }, - }); - break :res2 Air.indexToRef(block_inst); + break :res2 try sema.addConstant(fn_ret_ty, result.val); } } @@ -9407,7 +9419,7 @@ fn zirParam( if (is_comptime) { // If this is a comptime parameter we can add a constant generic_poison // since this is also a generic parameter. - const result = try sema.addConstant(param_ty, Value.generic_poison); + const result = try sema.addConstant(Type.generic_poison, Value.generic_poison); sema.inst_map.putAssumeCapacityNoClobber(inst, result); } else { // Otherwise we need a dummy runtime instruction. @@ -15104,7 +15116,7 @@ fn analyzePtrArithmetic( if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } - const new_ptr_val = try ptr_val.elemPtr(ptr_ty, offset_int, sema.mod); + const new_ptr_val = try ptr_val.elemPtr(new_ptr_ty, offset_int, sema.mod); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; @@ -25378,8 +25390,8 @@ fn elemPtrOneLayerOnly( const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr = try ptr_val.elemPtr(indexable_ty, index, mod); const result_ty = try sema.elemPtrType(indexable_ty, index); + const elem_ptr = try ptr_val.elemPtr(result_ty, index, mod); return sema.addConstant(result_ty, elem_ptr); }; const result_ty = try sema.elemPtrType(indexable_ty, null); @@ -25424,8 +25436,9 @@ fn elemVal( const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, index, mod); - if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { + const elem_ptr_ty = try sema.elemPtrType(indexable_ty, index); + const elem_ptr_val = try indexable_val.elemPtr(elem_ptr_ty, index, mod); + if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return sema.addConstant(indexable_ty.elemType2(mod), elem_val); } break :rs indexable_src; @@ -25684,7 +25697,7 @@ fn elemPtrArray( return sema.addConstUndef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, index, mod); + const elem_ptr = try array_ptr_val.elemPtr(elem_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr); } } @@ -25740,8 +25753,9 @@ fn elemValSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod); - if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| { + const elem_ptr_ty = try sema.elemPtrType(slice_ty, index); + const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod); + if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } runtime_src = slice_src; @@ -25800,7 +25814,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod); + const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr_val); } } @@ -25916,7 +25930,10 @@ fn coerceExtra( // null to ?T if (inst_ty.zigTypeTag(mod) == .Null) { - return sema.addConstant(dest_ty, Value.null); + return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{ + .ty = dest_ty.toIntern(), + .val = .none, + } })).toValue()); } // cast from ?*T and ?[*]T to ?*anyopaque @@ -27665,43 +27682,40 @@ fn storePtrVal( switch (mut_kit.pointee) { .direct => |val_ptr| { if (mut_kit.mut_decl.runtime_index == .comptime_field_ptr) { - if (!operand_val.eql(val_ptr.*, operand_ty, sema.mod)) { + if (!operand_val.eql(val_ptr.*, operand_ty, mod)) { // TODO use failWithInvalidComptimeFieldStore return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{}); } return; } - const arena = mut_kit.beginArena(sema.mod); - defer mut_kit.finishArena(sema.mod); - - val_ptr.* = try operand_val.copy(arena); + val_ptr.* = (try operand_val.intern(operand_ty, mod)).toValue(); }, .reinterpret => |reinterpret| { const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); - reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) { + reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, mod, buffer) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}), }; - operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { + operand_val.writeToMemory(operand_ty, mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}), }; - const arena = mut_kit.beginArena(sema.mod); - defer mut_kit.finishArena(sema.mod); + const arena = mut_kit.beginArena(mod); + defer mut_kit.finishArena(mod); - reinterpret.val_ptr.* = try Value.readFromMemory(mut_kit.ty, sema.mod, buffer, arena); + reinterpret.val_ptr.* = (try (try Value.readFromMemory(mut_kit.ty, mod, buffer, arena)).intern(mut_kit.ty, mod)).toValue(); }, .bad_decl_ty, .bad_ptr_ty => { // TODO show the decl declaration site in a note and explain whether the decl // or the pointer is the problematic type - return sema.fail(block, src, "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", .{mut_kit.ty.fmt(sema.mod)}); + return sema.fail(block, src, "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", .{mut_kit.ty.fmt(mod)}); }, } } @@ -27754,7 +27768,7 @@ fn beginComptimePtrMutation( const mod = sema.mod; const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr; switch (ptr.addr) { - .decl => unreachable, // isComptimeMutablePtr has been checked already + .decl, .int => unreachable, // isComptimeMutablePtr has been checked already .mut_decl => |mut_decl| { const decl = mod.declPtr(mut_decl.decl); return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, mut_decl); @@ -27767,546 +27781,472 @@ fn beginComptimePtrMutation( .runtime_index = .comptime_field_ptr, }); }, - else => unreachable, - } - if (true) unreachable; - switch (ptr_val.toIntern()) { - .none => switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; - const decl = sema.mod.declPtr(decl_ref_mut.decl_index); - return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, decl_ref_mut); - }, - .comptime_field_ptr => { - const payload = ptr_val.castTag(.comptime_field_ptr).?.data; - const duped = try sema.arena.create(Value); - duped.* = payload.field_val; - return sema.beginComptimePtrMutationInner(block, src, payload.field_ty, duped, ptr_elem_ty, .{ - .decl_index = @intToEnum(Module.Decl.Index, 0), - .runtime_index = .comptime_field_ptr, - }); - }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); - - switch (parent.pointee) { - .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { - .Array, .Vector => { - const check_len = parent.ty.arrayLenIncludingSentinel(mod); - if (elem_ptr.index >= check_len) { - // TODO have the parent include the decl so we can say "declared here" - return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ - elem_ptr.index, check_len, - }); - } - const elem_ty = parent.ty.childType(mod); - - // We might have a pointer to multiple elements of the array (e.g. a pointer - // to a sub-array). In this case, we just have to reinterpret the relevant - // bytes of the whole array rather than any single element. - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); - if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - return .{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = val_ptr, - .byte_offset = elem_abi_size * elem_ptr.index, - } }, - .ty = parent.ty, - }; - } + .eu_payload => |eu_ptr| { + const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod); + var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.toValue(), eu_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.errorUnionPayload(mod); + if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, + .ty = payload_ty, + }; + } else { + // An error union has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the error union from `undef` to `opt_payload`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const payload = try arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .eu_payload }, + .data = Value.undef, + }; - switch (val_ptr.toIntern()) { - .undef => { - // An array has been initialized to undefined at comptime and now we - // are for the first time setting an element. We must change the representation - // of the array from `undef` to `array`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + val_ptr.* = Value.initPayload(&payload.base); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - @memset(elems, Value.undef); + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = eu_ty, + }, + } + }, + .opt_payload => |opt_ptr| { + const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod); + var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.toValue(), opt_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.optionalChild(mod); + switch (val_ptr.ip_index) { + .undef, .null_value => { + // An optional has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the optional from `undef` to `opt_payload`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + const payload = try arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .opt_payload }, + .data = Value.undef, + }; - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .none => switch (val_ptr.tag()) { - .bytes => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `bytes` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - // bytes.len may be one greater than dest_len because of the case when - // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. - assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (elems, 0..) |*elem, i| { - elem.* = try mod.intValue(elem_ty, bytes[i]); - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .str_lit => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `str_lit` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (bytes, 0..) |byte, i| { - elems[i] = try mod.intValue(elem_ty, byte); - } - if (parent.ty.sentinel(mod)) |sent_val| { - assert(elems.len == bytes.len + 1); - elems[bytes.len] = sent_val; - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .repeated => { - // An array is memory-optimized to store only a single element value, and - // that value is understood to be the same for the entire length of the array. - // However, now we want to modify an individual field and so the - // representation has to change. If we wanted to avoid this, there would - // need to be special detection elsewhere to identify when writing a value to an - // array element that is stored using the `repeated` tag, and handle it - // without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - if (elems.len > 0) elems[0] = repeated_val; - for (elems[1..]) |*elem| { - elem.* = try repeated_val.copy(arena); - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ), + val_ptr.* = Value.initPayload(&payload.base); - .the_only_possible_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; + }, + .none => switch (val_ptr.tag()) { + .opt_payload => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, + .ty = payload_ty, + }, - else => unreachable, - }, - else => unreachable, - } + else => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = val_ptr }, + .ty = payload_ty, + }, }, - else => { - if (elem_ptr.index != 0) { - // TODO include a "declared here" note for the decl - return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ - elem_ptr.index, - }); - } - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty, - val_ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ); + else => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = val_ptr }, + .ty = payload_ty, }, - }, - .reinterpret => |reinterpret| { - if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) { - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = elem_ptr.elem_ty, + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = opt_ty, + }, + } + }, + .elem => |elem_ptr| { + const base_elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); + var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.base.toValue(), base_elem_ty); + + switch (parent.pointee) { + .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { + .Array, .Vector => { + const check_len = parent.ty.arrayLenIncludingSentinel(mod); + if (elem_ptr.index >= check_len) { + // TODO have the parent include the decl so we can say "declared here" + return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ + elem_ptr.index, check_len, + }); + } + const elem_ty = parent.ty.childType(mod); + + // We might have a pointer to multiple elements of the array (e.g. a pointer + // to a sub-array). In this case, we just have to reinterpret the relevant + // bytes of the whole array rather than any single element. + const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); + if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { + const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + return .{ + .mut_decl = parent.mut_decl, + .pointee = .{ .reinterpret = .{ + .val_ptr = val_ptr, + .byte_offset = elem_abi_size * elem_ptr.index, + } }, + .ty = parent.ty, }; } - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, - } }, - .ty = parent.ty, - }; - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); + switch (val_ptr.ip_index) { + .undef => { + // An array has been initialized to undefined at comptime and now we + // are for the first time setting an element. We must change the representation + // of the array from `undef` to `array`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| switch (val_ptr.toIntern()) { - .undef => { - // A struct or union has been initialized to undefined at comptime and now we - // are for the first time setting a field. We must change the representation - // of the struct/union from `undef` to `struct`/`union`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + @memset(elems, Value.undef); - switch (parent.ty.zigTypeTag(mod)) { - .Struct => { - const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(fields, Value.undef); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - val_ptr.* = try Value.Tag.aggregate.create(arena, fields); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .none => switch (val_ptr.tag()) { + .bytes => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `bytes` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const bytes = val_ptr.castTag(.bytes).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + // bytes.len may be one greater than dest_len because of the case when + // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. + assert(bytes.len >= dest_len); + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (elems, 0..) |*elem, i| { + elem.* = try mod.intValue(elem_ty, bytes[i]); + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); return beginComptimePtrMutationInner( sema, block, src, - parent.ty.structFieldType(field_index, mod), - &fields[field_index], + elem_ty, + &elems[elem_ptr.index], ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, - .Union => { - const payload = try arena.create(Value.Payload.Union); - const tag_ty = parent.ty.unionTagTypeHypothetical(mod); - payload.* = .{ .data = .{ - .tag = try mod.enumValueFieldIndex(tag_ty, field_index), - .val = Value.undef, - } }; + .repeated => { + // An array is memory-optimized to store only a single element value, and + // that value is understood to be the same for the entire length of the array. + // However, now we want to modify an individual field and so the + // representation has to change. If we wanted to avoid this, there would + // need to be special detection elsewhere to identify when writing a value to an + // array element that is stored using the `repeated` tag, and handle it + // without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + if (elems.len > 0) elems[0] = repeated_val; + for (elems[1..]) |*elem| { + elem.* = try repeated_val.copy(arena); + } - val_ptr.* = Value.initPayload(&payload.base); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); return beginComptimePtrMutationInner( sema, block, src, - parent.ty.structFieldType(field_index, mod), - &payload.data.val, + elem_ty, + &elems[elem_ptr.index], ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, - .Pointer => { - assert(parent.ty.isSlice(mod)); - val_ptr.* = try Value.Tag.slice.create(arena, .{ - .ptr = Value.undef, - .len = Value.undef, - }); - - switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), - - else => unreachable, - } - }, + + .aggregate => return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], + ptr_elem_ty, + parent.mut_decl, + ), + else => unreachable, - } - }, - .empty_struct => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .none => switch (val_ptr.tag()) { - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &val_ptr.castTag(.aggregate).?.data[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ), - .repeated => { - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + }, + else => unreachable, + } + }, + else => { + if (elem_ptr.index != 0) { + // TODO include a "declared here" note for the decl + return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ + elem_ptr.index, + }); + } + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty, + val_ptr, + ptr_elem_ty, + parent.mut_decl, + ); + }, + }, + .reinterpret => |reinterpret| { + if (!base_elem_ty.hasWellDefinedLayout(mod)) { + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = base_elem_ty, + }; + } - const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(elems, val_ptr.castTag(.repeated).?.data); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); + const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .reinterpret = .{ + .val_ptr = reinterpret.val_ptr, + .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, + } }, + .ty = parent.ty, + }; + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + } + }, + .field => |field_ptr| { + const base_child_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + const field_index = @intCast(u32, field_ptr.index); + + var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.base.toValue(), base_child_ty); + switch (parent.pointee) { + .direct => |val_ptr| switch (val_ptr.ip_index) { + .undef => { + // A struct or union has been initialized to undefined at comptime and now we + // are for the first time setting a field. We must change the representation + // of the struct/union from `undef` to `struct`/`union`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + switch (parent.ty.zigTypeTag(mod)) { + .Struct => { + const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(fields, Value.undef); + + val_ptr.* = try Value.Tag.aggregate.create(arena, fields); return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index, mod), - &elems[field_index], + &fields[field_index], ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, - .@"union" => { - // We need to set the active field of the union. - const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); + .Union => { + const payload = try arena.create(Value.Payload.Union); + const tag_ty = parent.ty.unionTagTypeHypothetical(mod); + payload.* = .{ .data = .{ + .tag = try mod.enumValueFieldIndex(tag_ty, field_index), + .val = Value.undef, + } }; - const payload = &val_ptr.castTag(.@"union").?.data; - payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); + val_ptr.* = Value.initPayload(&payload.base); return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index, mod), - &payload.val, + &payload.data.val, ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, - .slice => switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), + .Pointer => { + assert(parent.ty.isSlice(mod)); + val_ptr.* = try Value.Tag.slice.create(arena, .{ + .ptr = Value.undef, + .len = Value.undef, + }); - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), + switch (field_index) { + Value.slice_ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.mut_decl, + ), + Value.slice_len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.mut_decl, + ), - else => unreachable, + else => unreachable, + } }, - else => unreachable, - }, - else => unreachable, + } }, - .reinterpret => |reinterpret| { - const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod); - const field_offset = try sema.usizeCast(block, src, field_offset_u64); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + field_offset, - } }, - .ty = parent.ty, - }; + .empty_struct => { + const duped = try sema.arena.create(Value); + duped.* = val_ptr.*; + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + duped, + ptr_elem_ty, + parent.mut_decl, + ); }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .eu_payload_ptr => { - const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.errorUnionPayload(mod); - if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, - .ty = payload_ty, - }; - } else { - // An error union has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the error union from `undef` to `opt_payload`. + .none => switch (val_ptr.tag()) { + .aggregate => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &val_ptr.castTag(.aggregate).?.data[field_index], + ptr_elem_ty, + parent.mut_decl, + ), + .repeated => { const arena = parent.beginArena(sema.mod); defer parent.finishArena(sema.mod); - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .eu_payload }, - .data = Value.undef, - }; + const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(elems, val_ptr.castTag(.repeated).?.data); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - val_ptr.* = Value.initPayload(&payload.base); + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &elems[field_index], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .@"union" => { + // We need to set the active field of the union. + const union_tag_ty = base_child_ty.unionTagTypeHypothetical(mod); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = eu_ptr.container_ty, - }, - } - }, - .opt_payload_ptr => { - const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { - return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod)); - }; - var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.optionalChild(mod); - switch (val_ptr.toIntern()) { - .undef, .null_value => { - // An optional has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the optional from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const payload = &val_ptr.castTag(.@"union").?.data; + payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .opt_payload }, - .data = Value.undef, - }; + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &payload.val, + ptr_elem_ty, + parent.mut_decl, + ); + }, + .slice => switch (field_index) { + Value.slice_ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.mut_decl, + ), - val_ptr.* = Value.initPayload(&payload.base); + Value.slice_len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.mut_decl, + ), - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - }, - .none => switch (val_ptr.tag()) { - .opt_payload => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, - }, + else => unreachable, + }, - else => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - }, - else => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = opt_ptr.container_ty, + else => unreachable, }, - } - }, - .decl_ref => unreachable, // isComptimeMutablePtr has been checked already - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr) { - else => unreachable, + else => unreachable, + }, + .reinterpret => |reinterpret| { + const field_offset_u64 = base_child_ty.structFieldOffset(field_index, mod); + const field_offset = try sema.usizeCast(block, src, field_offset_u64); + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .reinterpret = .{ + .val_ptr = reinterpret.val_ptr, + .byte_offset = reinterpret.byte_offset + field_offset, + } }, + .ty = parent.ty, + }; + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + } }, } } @@ -28418,6 +28358,7 @@ fn beginComptimePtrLoad( .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, }; + const is_mutable = ptr.addr == .mut_decl; const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); if (decl.getVariable(mod) != null) return error.RuntimeLoad; @@ -28426,7 +28367,7 @@ fn beginComptimePtrLoad( break :blk ComptimePtrLoadKit{ .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, .pointee = decl_tv, - .is_mutable = false, + .is_mutable = is_mutable, .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, }; }, @@ -29411,7 +29352,7 @@ fn analyzeDeclVal( const decl_ref = try sema.analyzeDeclRefInner(decl_index, false); const result = try sema.analyzeLoad(block, src, decl_ref, src); if (Air.refToIndex(result)) |index| { - if (sema.air_instructions.items(.tag)[index] == .constant and !block.is_typeof) { + if (sema.air_instructions.items(.tag)[index] == .interned and !block.is_typeof) { try sema.decl_val_table.put(sema.gpa, decl_index, result); } } @@ -30049,8 +29990,8 @@ fn analyzeSlice( const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); - const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sentinel_index, sema.mod); - const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty, false); + const elem_ptr = try ptr_val.elemPtr(try sema.elemPtrType(new_ptr_ty, sentinel_index), sentinel_index, sema.mod); + const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, .val => |v| v, @@ -33421,35 +33362,24 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { } pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; - if (val.ip_index != .none) { - if (@enumToInt(val.toIntern()) < Air.ref_start_index) - return @intToEnum(Air.Inst.Ref, @enumToInt(val.toIntern())); - try sema.air_instructions.append(gpa, .{ - .tag = .interned, - .data = .{ .interned = val.toIntern() }, - }); - const result = Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); - // This assertion can be removed when the `ty` parameter is removed from - // this function thanks to the InternPool transition being complete. - if (std.debug.runtime_safety) { - const val_ty = sema.typeOf(result); - if (!Type.eql(val_ty, ty, sema.mod)) { - std.debug.panic("addConstant type mismatch: '{}' vs '{}'\n", .{ - ty.fmt(sema.mod), val_ty.fmt(sema.mod), - }); - } + + // This assertion can be removed when the `ty` parameter is removed from + // this function thanks to the InternPool transition being complete. + if (std.debug.runtime_safety) { + const val_ty = mod.intern_pool.typeOf(val.toIntern()); + if (ty.toIntern() != val_ty) { + std.debug.panic("addConstant type mismatch: '{}' vs '{}'\n", .{ + ty.fmt(mod), val_ty.toType().fmt(mod), + }); } - return result; } - const ty_inst = try sema.addType(ty); - try sema.air_values.append(gpa, val); + if (@enumToInt(val.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(val.toIntern())); try sema.air_instructions.append(gpa, .{ - .tag = .constant, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), - } }, + .tag = .interned, + .data = .{ .interned = val.toIntern() }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } @@ -33606,7 +33536,7 @@ pub fn analyzeAddressSpace( fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { const mod = sema.mod; const load_ty = ptr_ty.childType(mod); - const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty, true); + const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty); switch (res) { .runtime_load => return null, .val => |v| return v, @@ -33632,7 +33562,7 @@ const DerefResult = union(enum) { out_of_bounds: Type, }; -fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type, want_mutable: bool) CompileError!DerefResult { +fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type) CompileError!DerefResult { const mod = sema.mod; const target = mod.getTarget(); const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) { @@ -33647,13 +33577,8 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value if (coerce_in_mem_ok) { // We have a Value that lines up in virtual memory exactly with what we want to load, // and it is in-memory coercible to load_ty. It may be returned without modifications. - if (deref.is_mutable and want_mutable) { - // The decl whose value we are obtaining here may be overwritten with - // a different value upon further semantic analysis, which would - // invalidate this memory. So we must copy here. - return DerefResult{ .val = try tv.val.copy(sema.arena) }; - } - return DerefResult{ .val = tv.val }; + // Move mutable decl values to the InternPool and assert other decls are already in the InternPool. + return .{ .val = (if (deref.is_mutable) try tv.val.intern(tv.ty, mod) else tv.val.toIntern()).toValue() }; } } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index d82fb72dea01..020686f86ee5 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -124,6 +124,60 @@ pub fn print( } return writer.writeAll(" }"); }, + .slice => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const payload = val.castTag(.slice).?.data; + const elem_ty = ty.elemType2(mod); + const len = payload.len.toUnsignedInt(mod); + + if (elem_ty.eql(Type.u8, mod)) str: { + const max_len = @intCast(usize, std.math.min(len, max_string_len)); + var buf: [max_string_len]u8 = undefined; + + var i: u32 = 0; + while (i < max_len) : (i += 1) { + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; + if (elem_val.isUndef(mod)) break :str; + buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; + } + + // TODO would be nice if this had a bit of unicode awareness. + const truncated = if (len > max_string_len) " (truncated)" else ""; + return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); + } + + try writer.writeAll(".{ "); + + const max_len = std.math.min(len, max_aggregate_items); + var i: u32 = 0; + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; + try print(.{ + .ty = elem_ty, + .val = elem_val, + }, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); + }, + .eu_payload => { + val = val.castTag(.eu_payload).?.data; + ty = ty.errorUnionPayload(mod); + }, + .opt_payload => { + val = val.castTag(.opt_payload).?.data; + ty = ty.optionalChild(mod); + return print(.{ .ty = ty, .val = val }, writer, level, mod); + }, // TODO these should not appear in this function .inferred_alloc => return writer.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 16b103c898e4..54a34e8f09f4 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -845,8 +845,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -919,8 +918,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -6155,15 +6153,15 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { }); switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index f0a44b72a858..8f1a8fdb670d 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -829,8 +829,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -903,8 +902,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -6103,15 +6101,15 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { }); switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 7f4715a4510c..660630503ddd 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -659,8 +659,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -730,8 +729,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -2557,15 +2555,15 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { }); switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 9f44dc0e8a82..c7376a6eb7de 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -679,8 +679,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -4423,8 +4422,7 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -4553,15 +4551,15 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { if (Air.refToIndex(ref)) |inst| { switch (self.air.instructions.items(.tag)[inst]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const interned = self.air.instructions.items(.data)[inst].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 85fc8346f837..b4e627e957a2 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -883,7 +883,7 @@ fn iterateBigTomb(func: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !B fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void { const inst = Air.refToIndex(ref) orelse return; - if (func.air.instructions.items(.tag)[inst] == .constant) return; + assert(func.air.instructions.items(.tag)[inst] != .interned); // Branches are currently only allowed to free locals allocated // within their own branch. // TODO: Upon branch consolidation free any locals if needed. @@ -1832,8 +1832,7 @@ fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: en fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const air_tags = func.air.instructions.items(.tag); return switch (air_tags[inst]) { - .constant => unreachable, - .interned => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .add => func.airBinOp(inst, .add), .add_sat => func.airSatBinOp(inst, .add), diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index f2ac98584415..48504dee8fb5 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1922,8 +1922,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => if (self.wantSafety()) try self.airTrap() else self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -2097,10 +2096,8 @@ fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - switch (self.air.instructions.items(.tag)[inst]) { - .constant => unreachable, - else => self.inst_tracking.getPtr(inst).?.die(self, inst), - } + assert(self.air.instructions.items(.tag)[inst] != .interned); + self.inst_tracking.getPtr(inst).?.die(self, inst); } /// Called when there are no operands, and the instruction is always unreferenced. @@ -2876,8 +2873,8 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { const dst_info = dst_ty.intInfo(mod); if (Air.refToIndex(dst_air)) |inst| { switch (air_tag[inst]) { - .constant => { - const src_val = self.air.values[air_data[inst].ty_pl.payload]; + .interned => { + const src_val = air_data[inst].interned.toValue(); var space: Value.BigIntSpace = undefined; const src_int = src_val.toBigInt(&space, mod); return @intCast(u16, src_int.bitCountTwosComp()) + @@ -11584,11 +11581,11 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { if (Air.refToIndex(ref)) |inst| { const mcv = switch (self.air.instructions.items(.tag)[inst]) { - .constant => tracking: { + .interned => tracking: { const gop = try self.const_tracking.getOrPut(self.gpa, inst); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{ .ty = ty, - .val = (try self.air.value(ref, mod)).?, + .val = self.air.instructions.items(.data)[inst].interned.toValue(), })); break :tracking gop.value_ptr; }, @@ -11605,7 +11602,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { const tracking = switch (self.air.instructions.items(.tag)[inst]) { - .constant => &self.const_tracking, + .interned => &self.const_tracking, else => &self.inst_tracking, }.getPtr(inst).?; return switch (tracking.short) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 76533b4284a1..f97292e5102e 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -2890,8 +2890,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, const result_value = switch (air_tags[inst]) { // zig fmt: off - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .arg => try airArg(f, inst), @@ -7783,8 +7782,8 @@ fn reap(f: *Function, inst: Air.Inst.Index, operands: []const Air.Inst.Ref) !voi fn die(f: *Function, inst: Air.Inst.Index, ref: Air.Inst.Ref) !void { const ref_inst = Air.refToIndex(ref) orelse return; + assert(f.air.instructions.items(.tag)[ref_inst] != .interned); const c_value = (f.value_map.fetchRemove(ref_inst) orelse return).value; - if (f.air.instructions.items(.tag)[ref_inst] == .constant) return; const local_index = switch (c_value) { .local, .new_local => |l| l, else => return, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 0c12faf75183..46b126ad84db 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4530,8 +4530,7 @@ pub const FuncGen = struct { .vector_store_elem => try self.airVectorStoreElem(inst), - .constant => unreachable, - .interned => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.airUnreach(inst), .dbg_stmt => self.airDbgStmt(inst), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 2b04e03a5a6f..1a19bbdf91f7 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1807,7 +1807,6 @@ pub const DeclGen = struct { .br => return self.airBr(inst), .breakpoint => return, .cond_br => return self.airCondBr(inst), - .constant => unreachable, .dbg_stmt => return self.airDbgStmt(inst), .loop => return self.airLoop(inst), .ret => return self.airRet(inst), diff --git a/src/print_air.zig b/src/print_air.zig index 58e4029543cc..204f5ddeb991 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -93,14 +93,10 @@ const Writer = struct { fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void { for (w.air.instructions.items(.tag), 0..) |tag, i| { + if (tag != .interned) continue; const inst = @intCast(Air.Inst.Index, i); - switch (tag) { - .constant, .interned => { - try w.writeInst(s, inst); - try s.writeByte('\n'); - }, - else => continue, - } + try w.writeInst(s, inst); + try s.writeByte('\n'); } } @@ -304,7 +300,7 @@ const Writer = struct { .struct_field_ptr => try w.writeStructField(s, inst), .struct_field_val => try w.writeStructField(s, inst), - .constant => try w.writeConstant(s, inst), + .inferred_alloc, .inferred_alloc_comptime => try w.writeConstant(s, inst), .interned => try w.writeInterned(s, inst), .assembly => try w.writeAssembly(s, inst), .dbg_stmt => try w.writeDbgStmt(s, inst), diff --git a/src/value.zig b/src/value.zig index 02f4422dda0b..9244e33ad55c 100644 --- a/src/value.zig +++ b/src/value.zig @@ -35,6 +35,22 @@ pub const Value = struct { // The first section of this enum are tags that require no payload. // After this, the tag requires a payload. + /// When the type is error union: + /// * If the tag is `.@"error"`, the error union is an error. + /// * If the tag is `.eu_payload`, the error union is a payload. + /// * A nested error such as `anyerror!(anyerror!T)` in which the the outer error union + /// is non-error, but the inner error union is an error, is represented as + /// a tag of `.eu_payload`, with a sub-tag of `.@"error"`. + eu_payload, + /// When the type is optional: + /// * If the tag is `.null_value`, the optional is null. + /// * If the tag is `.opt_payload`, the optional is a payload. + /// * A nested optional such as `??T` in which the the outer optional + /// is non-null, but the inner optional is null, is represented as + /// a tag of `.opt_payload`, with a sub-tag of `.null_value`. + opt_payload, + /// Pointer and length as sub `Value` objects. + slice, /// A slice of u8 whose memory is managed externally. bytes, /// This value is repeated some number of times. The amount of times to repeat @@ -58,14 +74,16 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .repeated => Payload.SubValue, - + .eu_payload, + .opt_payload, + .repeated, + => Payload.SubValue, + .slice => Payload.Slice, .bytes => Payload.Bytes, - - .inferred_alloc => Payload.InferredAlloc, - .inferred_alloc_comptime => Payload.InferredAllocComptime, .aggregate => Payload.Aggregate, .@"union" => Payload.Union, + .inferred_alloc => Payload.InferredAlloc, + .inferred_alloc_comptime => Payload.InferredAllocComptime, }; } @@ -172,7 +190,10 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .repeated => { + .eu_payload, + .opt_payload, + .repeated, + => { const payload = self.cast(Payload.SubValue).?; const new_payload = try arena.create(Payload.SubValue); new_payload.* = .{ @@ -184,6 +205,21 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, + .slice => { + const payload = self.castTag(.slice).?; + const new_payload = try arena.create(Payload.Slice); + new_payload.* = .{ + .base = payload.base, + .data = .{ + .ptr = try payload.data.ptr.copy(arena), + .len = try payload.data.len.copy(arena), + }, + }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; + }, .aggregate => { const payload = self.castTag(.aggregate).?; const new_payload = try arena.create(Payload.Aggregate); @@ -263,6 +299,15 @@ pub const Value = struct { try out_stream.writeAll("(repeated) "); val = val.castTag(.repeated).?.data; }, + .eu_payload => { + try out_stream.writeAll("(eu_payload) "); + val = val.castTag(.repeated).?.data; + }, + .opt_payload => { + try out_stream.writeAll("(opt_payload) "); + val = val.castTag(.repeated).?.data; + }, + .slice => return out_stream.writeAll("(slice)"), .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"), }; @@ -1653,13 +1698,18 @@ pub const Value = struct { .Null, .Struct, // It sure would be nice to do something clever with structs. => |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag), + .Pointer => { + assert(ty.isSlice(mod)); + const slice = val.castTag(.slice).?.data; + const ptr_ty = ty.slicePtrFieldType(mod); + slice.ptr.hashUncoerced(ptr_ty, hasher, mod); + }, .Type, .Float, .ComptimeFloat, .Bool, .Int, .ComptimeInt, - .Pointer, .Fn, .Optional, .ErrorSet, @@ -1799,9 +1849,15 @@ pub const Value = struct { /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { - switch (val.toIntern()) { - .undef => return Value.undef, - else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { + return switch (val.ip_index) { + .undef => Value.undef, + .none => switch (val.tag()) { + .repeated => val.castTag(.repeated).?.data, + .aggregate => val.castTag(.aggregate).?.data[index], + .slice => val.castTag(.slice).?.data.ptr.elemValue(mod, index), + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), @@ -1829,7 +1885,7 @@ pub const Value = struct { }, else => unreachable, }, - } + }; } pub fn isLazyAlign(val: Value, mod: *Module) bool { @@ -1875,25 +1931,28 @@ pub const Value = struct { } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .variable => |variable| variable.is_threadlocal, - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl_index| { - const decl = mod.declPtr(decl_index); - assert(decl.has_tv); - return decl.val.isPtrToThreadLocal(mod); - }, - .mut_decl => |mut_decl| { - const decl = mod.declPtr(mut_decl.decl); - assert(decl.has_tv); - return decl.val.isPtrToThreadLocal(mod); + return switch (val.ip_index) { + .none => false, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| variable.is_threadlocal, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .int => false, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod), + .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod), }, - .int => false, - .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod), - .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod), - .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod), + else => false, }, - else => false, }; } @@ -1926,9 +1985,21 @@ pub const Value = struct { } pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { - switch (val.toIntern()) { - .undef => return Value.undef, - else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { + return switch (val.ip_index) { + .undef => Value.undef, + .none => switch (val.tag()) { + .aggregate => { + const field_values = val.castTag(.aggregate).?.data; + return field_values[index]; + }, + .@"union" => { + const payload = val.castTag(.@"union").?.data; + // TODO assert the tag is correct + return payload.val; + }, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .aggregate => |aggregate| switch (aggregate.storage) { .bytes => |bytes| try mod.intern(.{ .int = .{ .ty = .u8_type, @@ -1941,7 +2012,7 @@ pub const Value = struct { .un => |un| un.val.toValue(), else => unreachable, }, - } + }; } pub fn unionTag(val: Value, mod: *Module) Value { @@ -1956,36 +2027,17 @@ pub const Value = struct { /// Returns a pointer to the element value at the index. pub fn elemPtr( val: Value, - ty: Type, + elem_ptr_ty: Type, index: usize, mod: *Module, ) Allocator.Error!Value { - const elem_ty = ty.elemType2(mod); - const ptr_ty_key = mod.intern_pool.indexToKey(ty.toIntern()).ptr_type; - assert(ptr_ty_key.host_size == 0); - assert(ptr_ty_key.bit_offset == 0); - assert(ptr_ty_key.vector_index == .none); - const elem_alignment = InternPool.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); - const alignment = switch (ptr_ty_key.alignment) { - .none => .none, - else => ptr_ty_key.alignment.min( - @intToEnum(InternPool.Alignment, @ctz(index * elem_ty.abiSize(mod))), - ), - }; - const ptr_ty = try mod.ptrType(.{ - .elem_type = elem_ty.toIntern(), - .alignment = if (alignment == elem_alignment) .none else alignment, - .is_const = ptr_ty_key.is_const, - .is_volatile = ptr_ty_key.is_volatile, - .is_allowzero = ptr_ty_key.is_allowzero, - .address_space = ptr_ty_key.address_space, - }); + const elem_ty = elem_ptr_ty.childType(mod); const ptr_val = switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| ptr: { switch (ptr.addr) { .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod)) return (try mod.intern(.{ .ptr = .{ - .ty = ptr_ty.toIntern(), + .ty = elem_ptr_ty.toIntern(), .addr = .{ .elem = .{ .base = elem.base, .index = elem.index + index, @@ -2001,7 +2053,7 @@ pub const Value = struct { else => val, }; return (try mod.intern(.{ .ptr = .{ - .ty = ptr_ty.toIntern(), + .ty = elem_ptr_ty.toIntern(), .addr = .{ .elem = .{ .base = ptr_val.toIntern(), .index = index, @@ -4058,9 +4110,12 @@ pub const Value = struct { pub const Payload = struct { tag: Tag, - pub const SubValue = struct { + pub const Slice = struct { base: Payload, - data: Value, + data: struct { + ptr: Value, + len: Value, + }, }; pub const Bytes = struct { @@ -4069,6 +4124,11 @@ pub const Value = struct { data: []const u8, }; + pub const SubValue = struct { + base: Payload, + data: Value, + }; + pub const Aggregate = struct { base: Payload, /// Field values. The types are according to the struct or array type. @@ -4076,6 +4136,18 @@ pub const Value = struct { data: []Value, }; + pub const Union = struct { + pub const base_tag = Tag.@"union"; + + base: Payload = .{ .tag = base_tag }, + data: Data, + + pub const Data = struct { + tag: Value, + val: Value, + }; + }; + pub const InferredAlloc = struct { pub const base_tag = Tag.inferred_alloc; @@ -4110,18 +4182,6 @@ pub const Value = struct { alignment: u32, }, }; - - pub const Union = struct { - pub const base_tag = Tag.@"union"; - - base: Payload = .{ .tag = base_tag }, - data: Data, - - pub const Data = struct { - tag: Value, - val: Value, - }; - }; }; pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 555cda135d79..36e497afb9c5 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -682,4 +682,10 @@ def __lldb_init_module(debugger, _=None): add(debugger, category='zig.stage2', regex=True, type='^Air\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True) add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True) add(debugger, category='zig.stage2', type='InternPool.Index', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Int.Storage', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.ErrorUnion.Value', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Float.Storage', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Ptr.Addr', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Aggregate.Storage', identifier='zig_TaggedUnion', synth=True) add(debugger, category='zig.stage2', type='arch.x86_64.CodeGen.MCValue', identifier='zig_TaggedUnion', synth=True, inline_children=True, summary=True) From 5d0d5893fd39047e4fdbb6623e4d69babf0b2ed4 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 25 May 2023 23:47:44 -0400 Subject: [PATCH 099/205] Sema: fix some issues with the inferred alloc tag change --- src/Sema.zig | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 55adc2fffbbc..6927a4bde9d8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1999,15 +1999,17 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( return opv; } const air_datas = sema.air_instructions.items(.data); - switch (air_tags[i]) { - .interned => { - const val = air_datas[i].interned.toValue(); - if (val.isRuntimeValue(sema.mod)) make_runtime.* = true; - if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; - return val; + const val = switch (air_tags[i]) { + .inferred_alloc, .inferred_alloc_comptime => val: { + const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; + break :val sema.air_values.items[ty_pl.payload]; }, + .interned => air_datas[i].interned.toValue(), else => return null, - } + }; + if (val.isRuntimeValue(sema.mod)) make_runtime.* = true; + if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; + return val; } fn failWithNeededComptime(sema: *Sema, block: *Block, src: LazySrcLoc, reason: []const u8) CompileError { @@ -3762,13 +3764,17 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst; try sema.maybeQueueFuncBodyAnalysis(decl_index); - sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ - .ty = final_ptr_ty.toIntern(), - .addr = if (var_is_mut) .{ .mut_decl = .{ - .decl = decl_index, - .runtime_index = block.runtime_index, - } } else .{ .decl = decl_index }, - } })).toValue(); + // Change it to an interned. + sema.air_instructions.set(ptr_inst, .{ + .tag = .interned, + .data = .{ .interned = try sema.mod.intern(.{ .ptr = .{ + .ty = final_ptr_ty.toIntern(), + .addr = if (var_is_mut) .{ .mut_decl = .{ + .decl = decl_index, + .runtime_index = block.runtime_index, + } } else .{ .decl = decl_index }, + } }) }, + }); }, .inferred_alloc => { assert(sema.unresolved_inferred_allocs.remove(ptr_inst)); From f37c0a459382fa033cefc9bb139277436a78b25e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 25 May 2023 22:28:02 -0700 Subject: [PATCH 100/205] Sema: inferred allocations no longer abuse type/value system Previously, there were types and values for inferred allocations and a lot of special-case handling. Now, instead, the special casing is limited to AIR instructions for these use cases. Instead of storing data in Value payloads, the data is now stored in AIR instruction data as well as the previously `void` value type of the `unresolved_inferred_allocs` hash map. --- src/Air.zig | 36 ++++-- src/InternPool.zig | 12 -- src/Sema.zig | 288 ++++++++++++++++++++------------------------- src/TypedValue.zig | 3 - src/Zir.zig | 2 - src/type.zig | 36 +----- src/value.zig | 47 -------- 7 files changed, 160 insertions(+), 264 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 95ed7d33f164..e6cfc8c116ff 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -186,13 +186,17 @@ pub const Inst = struct { /// Allocates stack local memory. /// Uses the `ty` field. alloc, - /// This is a special value that tracks a set of types that have been stored - /// to an inferred allocation. It does not support any of the normal value queries. - /// Uses the `ty_pl` field, payload is an index of `values` array. + /// This special instruction only exists temporarily during semantic + /// analysis and is guaranteed to be unreachable in machine code + /// backends. It tracks a set of types that have been stored to an + /// inferred allocation. + /// Uses the `inferred_alloc` field. inferred_alloc, - /// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc - /// instructions for comptime code. - /// Uses the `ty_pl` field, payload is an index of `values` array. + /// This special instruction only exists temporarily during semantic + /// analysis and is guaranteed to be unreachable in machine code + /// backends. Used to coordinate alloc_inferred, store_to_inferred_ptr, + /// and resolve_inferred_alloc instructions for comptime code. + /// Uses the `inferred_alloc_comptime` field. inferred_alloc_comptime, /// If the function will pass the result by-ref, this instruction returns the /// result pointer. Otherwise it is equivalent to `alloc`. @@ -908,8 +912,6 @@ pub const Inst = struct { slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), - inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type), - inferred_alloc_mut_type = @enumToInt(InternPool.Index.inferred_alloc_mut_type), empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), undef = @enumToInt(InternPool.Index.undef), zero = @enumToInt(InternPool.Index.zero), @@ -997,6 +999,19 @@ pub const Inst = struct { // Index into a different array. payload: u32, }, + inferred_alloc_comptime: InferredAllocComptime, + inferred_alloc: InferredAlloc, + + pub const InferredAllocComptime = struct { + decl_index: Module.Decl.Index, + alignment: InternPool.Alignment, + is_const: bool, + }; + + pub const InferredAlloc = struct { + alignment: InternPool.Alignment, + is_const: bool, + }; // Make sure we don't accidentally add a field to make this union // bigger than expected. Note that in Debug builds, Zig is allowed @@ -1287,8 +1302,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .sub_with_overflow, .mul_with_overflow, .shl_with_overflow, - .inferred_alloc, - .inferred_alloc_comptime, .ptr_add, .ptr_sub, .try_ptr, @@ -1424,6 +1437,9 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .work_group_size, .work_group_id, => return Type.u32, + + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, } } diff --git a/src/InternPool.zig b/src/InternPool.zig index dfde35260034..f1bc4e3accdf 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1156,8 +1156,6 @@ pub const Index = enum(u32) { slice_const_u8_sentinel_0_type, anyerror_void_error_union_type, generic_poison_type, - inferred_alloc_const_type, - inferred_alloc_mut_type, /// `@TypeOf(.{})` empty_struct_type, @@ -1525,10 +1523,6 @@ pub const static_keys = [_]Key{ // generic_poison_type .{ .simple_type = .generic_poison }, - // inferred_alloc_const_type - .{ .simple_type = .inferred_alloc_const }, - // inferred_alloc_mut_type - .{ .simple_type = .inferred_alloc_mut }, // empty_struct_type .{ .anon_struct_type = .{ @@ -1958,12 +1952,6 @@ pub const SimpleType = enum(u32) { type_info, generic_poison, - /// TODO: remove this from `SimpleType`; instead make it only a special `Index` tag like - /// `var_args_param_type`. - inferred_alloc_const, - /// TODO: remove this from `SimpleType`; instead make it only a special `Index` tag like - /// `var_args_param_type`. - inferred_alloc_mut, }; pub const SimpleValue = enum(u32) { diff --git a/src/Sema.zig b/src/Sema.zig index 6927a4bde9d8..5395bb63d11a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -89,7 +89,9 @@ is_generic_instantiation: bool = false, /// function types will emit generic poison instead of a partial type. no_partial_func_ty: bool = false, -unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}, +/// The temporary arena is used for the memory of the `InferredAlloc` values +/// here so the values can be dropped without any cleanup. +unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{}, const std = @import("std"); const math = std.math; @@ -718,7 +720,7 @@ pub const Block = struct { } /// `alignment` value of 0 means to use ABI alignment. - pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value, alignment: u32) !Decl.Index { + pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value, alignment: u64) !Decl.Index { const sema = wad.block.sema; // Do this ahead of time because `createAnonymousDecl` depends on calling // `type.hasRuntimeBits()`. @@ -728,7 +730,8 @@ pub const Block = struct { .val = val, }); const new_decl = sema.mod.declPtr(new_decl_index); - new_decl.@"align" = alignment; + // TODO: migrate Decl alignment to use `InternPool.Alignment` + new_decl.@"align" = @intCast(u32, alignment); errdefer sema.mod.abortAnonDecl(new_decl_index); try new_decl.finalizeNewArena(&wad.new_decl_arena); wad.finished = true; @@ -748,6 +751,23 @@ const LabeledBlock = struct { } }; +/// The value stored in the inferred allocation. This will go into +/// peer type resolution. This is stored in a separate list so that +/// the items are contiguous in memory and thus can be passed to +/// `Module.resolvePeerTypes`. +const InferredAlloc = struct { + prongs: std.MultiArrayList(struct { + /// The dummy instruction used as a peer to resolve the type. + /// Although this has a redundant type with placeholder, this is + /// needed in addition because it may be a constant value, which + /// affects peer type resolution. + stored_inst: Air.Inst.Ref, + /// The bitcast instruction used as a placeholder when the + /// new result pointer type is not yet known. + placeholder: Air.Inst.Index, + }) = .{}, +}; + pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; sema.air_instructions.deinit(gpa); @@ -909,10 +929,10 @@ fn analyzeBodyInner( const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .alloc => try sema.zirAlloc(block, inst), - .alloc_inferred => try sema.zirAllocInferred(block, inst, .{ .ip_index = .inferred_alloc_const_type }), - .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, .{ .ip_index = .inferred_alloc_mut_type }), - .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, .{ .ip_index = .inferred_alloc_const_type }), - .alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, .{ .ip_index = .inferred_alloc_mut_type }), + .alloc_inferred => try sema.zirAllocInferred(block, inst, true), + .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, false), + .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, true), + .alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, false), .alloc_mut => try sema.zirAllocMut(block, inst), .alloc_comptime_mut => try sema.zirAllocComptime(block, inst), .make_ptr_const => try sema.zirMakePtrConst(block, inst), @@ -1707,7 +1727,7 @@ fn analyzeBodyInner( break :blk Air.Inst.Ref.void_value; }, }; - if (sema.typeOf(air_inst).isNoReturn(mod)) + if (sema.isNoReturn(air_inst)) break always_noreturn; map.putAssumeCapacity(inst, air_inst); i += 1; @@ -1751,8 +1771,6 @@ pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { // The last section of indexes refers to the map of ZIR => AIR. const inst = sema.inst_map.get(i - InternPool.static_len).?; if (inst == .generic_poison) return error.GenericPoison; - const ty = sema.typeOf(inst); - assert(!ty.isGenericPoison()); return inst; } @@ -2431,20 +2449,20 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const pointee_ty = try sema.resolveType(block, src, extra.lhs); const ptr = try sema.resolveInst(extra.rhs); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const addr_space = target_util.defaultAddressSpace(target, .local); if (Air.refToIndex(ptr)) |ptr_inst| { switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc => { - const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data; + const ia1 = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc; + const ia2 = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?; // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. // This instruction will not make it to codegen; it is only to participate @@ -2453,14 +2471,14 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE defer trash_block.instructions.deinit(sema.gpa); const operand = try trash_block.addBitCast(pointee_ty, .void_value); - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = pointee_ty, - .@"align" = inferred_alloc.alignment, - .@"addrspace" = addr_space, + const ptr_ty = try mod.ptrType(.{ + .elem_type = pointee_ty.toIntern(), + .alignment = ia1.alignment, + .address_space = addr_space, }); const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); - try inferred_alloc.prongs.append(sema.arena, .{ + try ia2.prongs.append(sema.arena, .{ .stored_inst = operand, .placeholder = Air.refToIndex(bitcasted_ptr).?, }); @@ -2468,31 +2486,30 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE return bitcasted_ptr; }, .inferred_alloc_comptime => { - const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; + const alignment = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime.alignment; // There will be only one coerce_result_ptr because we are running at comptime. // The alloc will turn into a Decl. var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - iac.data.decl_index = try anon_decl.finish( + const decl_index = try anon_decl.finish( pointee_ty, Value.undef, - iac.data.alignment, + alignment.toByteUnits(0), ); - if (iac.data.alignment != 0) { + sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime.decl_index = decl_index; + if (alignment != .none) { try sema.resolveTypeLayout(pointee_ty); } - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = pointee_ty, - .@"align" = iac.data.alignment, - .@"addrspace" = addr_space, + const ptr_ty = try mod.ptrType(.{ + .elem_type = pointee_ty.toIntern(), + .alignment = alignment, + .address_space = addr_space, }); - try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); - return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ + try sema.maybeQueueFuncBodyAnalysis(decl_index); + return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{ .ty = ptr_ty.toIntern(), .addr = .{ .mut_decl = .{ - .decl = iac.data.decl_index, + .decl = decl_index, .runtime_index = block.runtime_index, } }, } })).toValue()); @@ -3479,25 +3496,16 @@ fn zirAllocExtended( break :blk alignment; } else 0; - const inferred_alloc_ty = if (small.is_const) - Type{ .ip_index = .inferred_alloc_const_type } - else - Type{ .ip_index = .inferred_alloc_mut_type }; - if (block.is_comptime or small.is_comptime) { if (small.has_type) { return sema.analyzeComptimeAlloc(block, var_ty, alignment); } else { - const ty_inst = try sema.addType(inferred_alloc_ty); - try sema.air_values.append(gpa, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl_index = undefined, - .alignment = alignment, - })); try sema.air_instructions.append(gpa, .{ .tag = .inferred_alloc_comptime, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), + .data = .{ .inferred_alloc_comptime = .{ + .decl_index = undefined, + .alignment = InternPool.Alignment.fromByteUnits(alignment), + .is_const = small.is_const, } }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); @@ -3518,18 +3526,14 @@ fn zirAllocExtended( return block.addTy(.alloc, ptr_type); } - const ty_inst = try sema.addType(inferred_alloc_ty); - try sema.air_values.append(gpa, try Value.Tag.inferred_alloc.create(sema.arena, .{ - .alignment = alignment, - })); const result_index = try block.addInstAsIndex(.{ .tag = .inferred_alloc, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), + .data = .{ .inferred_alloc = .{ + .alignment = InternPool.Alignment.fromByteUnits(alignment), + .is_const = small.is_const, } }, }); - try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, {}); + try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, .{}); return Air.indexToRef(result_index); } @@ -3623,23 +3627,19 @@ fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Ai fn zirAllocInferredComptime( sema: *Sema, inst: Zir.Inst.Index, - inferred_alloc_ty: Type, + is_const: bool, ) CompileError!Air.Inst.Ref { const gpa = sema.gpa; const src_node = sema.code.instructions.items(.data)[inst].node; const src = LazySrcLoc.nodeOffset(src_node); sema.src = src; - const ty_inst = try sema.addType(inferred_alloc_ty); - try sema.air_values.append(gpa, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl_index = undefined, - .alignment = 0, - })); try sema.air_instructions.append(gpa, .{ .tag = .inferred_alloc_comptime, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), + .data = .{ .inferred_alloc_comptime = .{ + .decl_index = undefined, + .alignment = .none, + .is_const = is_const, } }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); @@ -3688,7 +3688,7 @@ fn zirAllocInferred( sema: *Sema, block: *Block, inst: Zir.Inst.Index, - inferred_alloc_ty: Type, + is_const: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3698,33 +3698,26 @@ fn zirAllocInferred( const src = LazySrcLoc.nodeOffset(src_node); sema.src = src; - const ty_inst = try sema.addType(inferred_alloc_ty); if (block.is_comptime) { - try sema.air_values.append(gpa, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl_index = undefined, - .alignment = 0, - })); try sema.air_instructions.append(gpa, .{ .tag = .inferred_alloc_comptime, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), + .data = .{ .inferred_alloc_comptime = .{ + .decl_index = undefined, + .alignment = .none, + .is_const = is_const, } }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } - try sema.air_values.append(gpa, try Value.Tag.inferred_alloc.create(sema.arena, .{ - .alignment = 0, - })); const result_index = try block.addInstAsIndex(.{ .tag = .inferred_alloc, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), + .data = .{ .inferred_alloc = .{ + .alignment = .none, + .is_const = is_const, } }, }); - try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, {}); + try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, .{}); return Air.indexToRef(result_index); } @@ -3732,44 +3725,36 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const ptr = try sema.resolveInst(inst_data.operand); const ptr_inst = Air.refToIndex(ptr).?; - const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload; - const ptr_val = sema.air_values.items[value_index]; - const var_is_mut = switch (sema.typeOf(ptr).toIntern()) { - .inferred_alloc_const_type => false, - .inferred_alloc_mut_type => true, - else => unreachable, - }; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; - const decl_index = iac.data.decl_index; - try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); + const iac = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime; + const decl_index = iac.decl_index; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); - const decl = sema.mod.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const final_elem_ty = decl.ty; - const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = final_elem_ty, - .mutable = true, - .@"align" = iac.data.alignment, - .@"addrspace" = target_util.defaultAddressSpace(target, .local), + const final_ptr_ty = try mod.ptrType(.{ + .elem_type = final_elem_ty.toIntern(), + .is_const = false, + .alignment = iac.alignment, + .address_space = target_util.defaultAddressSpace(target, .local), }); - const final_ptr_ty_inst = try sema.addType(final_ptr_ty); - sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst; try sema.maybeQueueFuncBodyAnalysis(decl_index); // Change it to an interned. sema.air_instructions.set(ptr_inst, .{ .tag = .interned, - .data = .{ .interned = try sema.mod.intern(.{ .ptr = .{ + .data = .{ .interned = try mod.intern(.{ .ptr = .{ .ty = final_ptr_ty.toIntern(), - .addr = if (var_is_mut) .{ .mut_decl = .{ + .addr = if (!iac.is_const) .{ .mut_decl = .{ .decl = decl_index, .runtime_index = block.runtime_index, } } else .{ .decl = decl_index }, @@ -3777,19 +3762,18 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com }); }, .inferred_alloc => { - assert(sema.unresolved_inferred_allocs.remove(ptr_inst)); - const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; - const peer_inst_list = inferred_alloc.data.prongs.items(.stored_inst); + const ia1 = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc; + const ia2 = sema.unresolved_inferred_allocs.fetchRemove(ptr_inst).?.value; + const peer_inst_list = ia2.prongs.items(.stored_inst); const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none); - const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = final_elem_ty, - .mutable = true, - .@"align" = inferred_alloc.data.alignment, - .@"addrspace" = target_util.defaultAddressSpace(target, .local), + const final_ptr_ty = try mod.ptrType(.{ + .elem_type = final_elem_ty.toIntern(), + .alignment = ia1.alignment, + .address_space = target_util.defaultAddressSpace(target, .local), }); - if (var_is_mut) { + if (!ia1.is_const) { try sema.validateVarType(block, ty_src, final_elem_ty, false); } else ct: { // Detect if the value is comptime-known. In such case, the @@ -3858,23 +3842,23 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const new_decl_index = try anon_decl.finish( final_elem_ty, try store_val.copy(anon_decl.arena()), - inferred_alloc.data.alignment, + ia1.alignment.toByteUnits(0), ); break :d new_decl_index; }; - try sema.mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); // Even though we reuse the constant instruction, we still remove it from the // block so that codegen does not see it. block.instructions.shrinkRetainingCapacity(search_index); try sema.maybeQueueFuncBodyAnalysis(new_decl_index); - sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ - .ty = final_elem_ty.toIntern(), - .addr = .{ .decl = new_decl_index }, - } })).toValue(); - // if bitcast ty ref needs to be made const, make_ptr_const - // ZIR handles it later, so we can just use the ty ref here. - air_datas[ptr_inst].ty_pl.ty = air_datas[bitcast_inst].ty_op.ty; + sema.air_instructions.set(ptr_inst, .{ + .tag = .interned, + .data = .{ .interned = try mod.intern(.{ .ptr = .{ + .ty = final_elem_ty.toIntern(), + .addr = .{ .decl = new_decl_index }, + } }) }, + }); // Unless the block is comptime, `alloc_inferred` always produces // a runtime constant. The final inferred type needs to be @@ -3895,18 +3879,17 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Now we need to go back over all the coerce_result_ptr instructions, which // previously inserted a bitcast as a placeholder, and do the logic as if // the new result ptr type was available. - const placeholders = inferred_alloc.data.prongs.items(.placeholder); + const placeholders = ia2.prongs.items(.placeholder); const gpa = sema.gpa; var trash_block = block.makeSubBlock(); trash_block.is_comptime = false; defer trash_block.instructions.deinit(gpa); - const mut_final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = final_elem_ty, - .mutable = true, - .@"align" = inferred_alloc.data.alignment, - .@"addrspace" = target_util.defaultAddressSpace(target, .local), + const mut_final_ptr_ty = try mod.ptrType(.{ + .elem_type = final_elem_ty.toIntern(), + .alignment = ia1.alignment, + .address_space = target_util.defaultAddressSpace(target, .local), }); const dummy_ptr = try trash_block.addTy(.alloc, mut_final_ptr_ty); const empty_trash_count = trash_block.instructions.items.len; @@ -3914,7 +3897,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com for (peer_inst_list, placeholders) |peer_inst, placeholder_inst| { const sub_ptr_ty = sema.typeOf(Air.indexToRef(placeholder_inst)); - if (mut_final_ptr_ty.eql(sub_ptr_ty, sema.mod)) { + if (mut_final_ptr_ty.eql(sub_ptr_ty, mod)) { // New result location type is the same as the old one; nothing // to do here. continue; @@ -5009,17 +4992,14 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const src: LazySrcLoc = sema.src; blk: { const ptr_inst = Air.refToIndex(ptr) orelse break :blk; - const air_data = sema.air_instructions.items(.data)[ptr_inst]; switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { - const ptr_val = sema.air_values.items[air_data.ty_pl.payload]; - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; + const iac = &sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime; return sema.storeToInferredAllocComptime(block, src, operand, iac); }, .inferred_alloc => { - const ptr_val = sema.air_values.items[air_data.ty_pl.payload]; - const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; - return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc); + const ia = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?; + return sema.storeToInferredAlloc(block, ptr, operand, ia); }, else => break :blk, } @@ -5038,16 +5018,15 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi const operand = try sema.resolveInst(bin_inst.rhs); const ptr_inst = Air.refToIndex(ptr).?; const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; + const iac = &air_datas[ptr_inst].inferred_alloc_comptime; return sema.storeToInferredAllocComptime(block, src, operand, iac); }, .inferred_alloc => { - const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; - return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc); + const ia = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?; + return sema.storeToInferredAlloc(block, ptr, operand, ia); }, else => unreachable, } @@ -5058,14 +5037,14 @@ fn storeToInferredAlloc( block: *Block, ptr: Air.Inst.Ref, operand: Air.Inst.Ref, - inferred_alloc: *Value.Payload.InferredAlloc, + inferred_alloc: *InferredAlloc, ) CompileError!void { // Create a store instruction as a placeholder. This will be replaced by a // proper store sequence once we know the stored type. const dummy_store = try block.addBinOp(.store, ptr, operand); // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. - try inferred_alloc.data.prongs.append(sema.arena, .{ + try inferred_alloc.prongs.append(sema.arena, .{ .stored_inst = operand, .placeholder = Air.refToIndex(dummy_store).?, }); @@ -5076,7 +5055,7 @@ fn storeToInferredAllocComptime( block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, - iac: *Value.Payload.InferredAllocComptime, + iac: *Air.Inst.Data.InferredAllocComptime, ) CompileError!void { const operand_ty = sema.typeOf(operand); // There will be only one store_to_inferred_ptr because we are running at comptime. @@ -5085,10 +5064,10 @@ fn storeToInferredAllocComptime( if (operand_val.getVariable(sema.mod) != null) break :store; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - iac.data.decl_index = try anon_decl.finish( + iac.decl_index = try anon_decl.finish( operand_ty, try operand_val.copy(anon_decl.arena()), - iac.data.alignment, + iac.alignment.toByteUnits(0), ); return; } @@ -27643,17 +27622,6 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { const prev_ptr = air_datas[ptr_inst].ty_op.operand; const prev_ptr_ty = sema.typeOf(prev_ptr); if (prev_ptr_ty.zigTypeTag(mod) != .Pointer) return null; - - // TODO: I noticed that the behavior tests do not pass if these two - // checks are missing. I don't understand why the presence of inferred - // allocations is relevant to this function, or why it would have - // different behavior depending on whether the types were inferred. - // Something seems wrong here. - switch (prev_ptr_ty.toIntern()) { - .inferred_alloc_mut_type, .inferred_alloc_const_type => return null, - else => {}, - } - const prev_ptr_child_ty = prev_ptr_ty.childType(mod); if (prev_ptr_child_ty.zigTypeTag(mod) == .Vector) break prev_ptr; ptr_inst = Air.refToIndex(prev_ptr) orelse return null; @@ -31749,9 +31717,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_literal, .type_info, => true, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; @@ -32009,8 +31974,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .bool_false => unreachable, .empty_struct => unreachable, .generic_poison => unreachable, - .inferred_alloc_const_type => unreachable, - .inferred_alloc_mut_type => unreachable, .type_info_type => return sema.getBuiltinType("Type"), .extern_options_type => return sema.getBuiltinType("ExternOptions"), @@ -33201,8 +33164,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .undefined => Value.undef, .generic_poison => return error.GenericPoison, - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { const resolved_ty = try sema.resolveTypeFields(ty); @@ -33737,9 +33698,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_literal, .type_info, => true, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; @@ -34501,3 +34459,13 @@ fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { return mod.errorSetFromUnsortedNames(names.keys()); } + +/// Avoids crashing the compiler when asking if inferred allocations are noreturn. +fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool { + if (ref == .noreturn_type) return true; + if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) { + .inferred_alloc, .inferred_alloc_comptime => return false, + else => {}, + }; + return sema.typeOf(ref).isNoReturn(sema.mod); +} diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 020686f86ee5..dd0ae66a6390 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -178,9 +178,6 @@ pub fn print( ty = ty.optionalChild(mod); return print(.{ .ty = ty, .val = val }, writer, level, mod); }, - // TODO these should not appear in this function - .inferred_alloc => return writer.writeAll("(inferred allocation value)"), - .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), }, else => { const key = mod.intern_pool.indexToKey(val.ip_index); diff --git a/src/Zir.zig b/src/Zir.zig index 3afff5ba6af2..c3a5f8e09bf2 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2112,8 +2112,6 @@ pub const Inst = struct { slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), - inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type), - inferred_alloc_mut_type = @enumToInt(InternPool.Index.inferred_alloc_mut_type), empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), undef = @enumToInt(InternPool.Index.undef), zero = @enumToInt(InternPool.Index.zero), diff --git a/src/type.zig b/src/type.zig index 087dc88c30fb..21e7ced3f496 100644 --- a/src/type.zig +++ b/src/type.zig @@ -88,8 +88,6 @@ pub const Type = struct { .type_info => .Union, .generic_poison => return error.GenericPoison, - - .inferred_alloc_const, .inferred_alloc_mut => return .Pointer, }, // values, not types @@ -620,8 +618,6 @@ pub const Type = struct { => false, .generic_poison => unreachable, - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { @@ -777,9 +773,6 @@ pub const Type = struct { .type_info, .generic_poison, => false, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { @@ -1028,8 +1021,6 @@ pub const Type = struct { .noreturn => unreachable, .generic_poison => unreachable, - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse @@ -1488,8 +1479,6 @@ pub const Type = struct { .type_info => unreachable, .noreturn => unreachable, .generic_poison => unreachable, - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| switch (ty.containerLayout(mod)) { .Packed => { @@ -1732,8 +1721,6 @@ pub const Type = struct { .undefined => unreachable, .enum_literal => unreachable, .generic_poison => unreachable, - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, .atomic_order => unreachable, // missing call to resolveTypeFields .atomic_rmw_op => unreachable, // missing call to resolveTypeFields @@ -1833,12 +1820,9 @@ pub const Type = struct { } pub fn isSinglePointer(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { - .inferred_alloc_const_type, .inferred_alloc_mut_type => true, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_info| ptr_info.size == .One, - else => false, - }, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_info| ptr_info.size == .One, + else => false, }; } @@ -1849,12 +1833,9 @@ pub const Type = struct { /// Returns `null` if `ty` is not a pointer. pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { - return switch (ty.ip_index) { - .inferred_alloc_const_type, .inferred_alloc_mut_type => .One, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_info| ptr_info.size, - else => null, - }, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_info| ptr_info.size, + else => null, }; } @@ -2612,8 +2593,6 @@ pub const Type = struct { .undefined => return Value.undef, .generic_poison => unreachable, - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { if (mod.structPtrUnwrap(struct_type.index)) |s| { @@ -2799,9 +2778,6 @@ pub const Type = struct { .enum_literal, .type_info, => true, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, }, .struct_type => |struct_type| { // A struct with no fields is not comptime-only. diff --git a/src/value.zig b/src/value.zig index 9244e33ad55c..498b7d33396a 100644 --- a/src/value.zig +++ b/src/value.zig @@ -63,12 +63,6 @@ pub const Value = struct { aggregate, /// An instance of a union. @"union", - /// This is a special value that tracks a set of types that have been stored - /// to an inferred allocation. It does not support any of the normal value queries. - inferred_alloc, - /// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc - /// instructions for comptime code. - inferred_alloc_comptime, pub const no_payload_count = 0; @@ -82,8 +76,6 @@ pub const Value = struct { .bytes => Payload.Bytes, .aggregate => Payload.Aggregate, .@"union" => Payload.Union, - .inferred_alloc => Payload.InferredAlloc, - .inferred_alloc_comptime => Payload.InferredAllocComptime, }; } @@ -250,8 +242,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, } } @@ -308,8 +298,6 @@ pub const Value = struct { val = val.castTag(.repeated).?.data; }, .slice => return out_stream.writeAll("(slice)"), - .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), - .inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"), }; } @@ -4147,41 +4135,6 @@ pub const Value = struct { val: Value, }; }; - - pub const InferredAlloc = struct { - pub const base_tag = Tag.inferred_alloc; - - base: Payload = .{ .tag = base_tag }, - data: struct { - /// The value stored in the inferred allocation. This will go into - /// peer type resolution. This is stored in a separate list so that - /// the items are contiguous in memory and thus can be passed to - /// `Module.resolvePeerTypes`. - prongs: std.MultiArrayList(struct { - /// The dummy instruction used as a peer to resolve the type. - /// Although this has a redundant type with placeholder, this is - /// needed in addition because it may be a constant value, which - /// affects peer type resolution. - stored_inst: Air.Inst.Ref, - /// The bitcast instruction used as a placeholder when the - /// new result pointer type is not yet known. - placeholder: Air.Inst.Index, - }) = .{}, - /// 0 means ABI-aligned. - alignment: u32, - }, - }; - - pub const InferredAllocComptime = struct { - pub const base_tag = Tag.inferred_alloc_comptime; - - base: Payload = .{ .tag = base_tag }, - data: struct { - decl_index: Module.Decl.Index, - /// 0 means ABI-aligned. - alignment: u32, - }, - }; }; pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; From 9a738c0be54c9bda0e57de9da84f86fc73bd5198 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 26 May 2023 00:24:29 -0400 Subject: [PATCH 101/205] Module: intern the values of decls when they are marked alive I'm not sure if this is the right place for this to happen, and it should become obsolete when comptime mutation is rewritten and the remaining legacy value tags are remove, so keeping this as a separate revertable commit. --- src/Module.zig | 36 +++++++++++++++++++----------------- src/Sema.zig | 2 +- src/arch/wasm/CodeGen.zig | 4 ++-- src/codegen.zig | 4 ++-- src/codegen/c.zig | 2 +- src/codegen/llvm.zig | 6 +++--- src/codegen/spirv.zig | 2 +- 7 files changed, 29 insertions(+), 27 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index d3045631c5f7..76e2142ae6de 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6603,47 +6603,49 @@ fn reportRetryableFileError( gop.value_ptr.* = err_msg; } -pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void { +pub fn markReferencedDeclsAlive(mod: *Module, val: Value) Allocator.Error!void { switch (mod.intern_pool.indexToKey(val.toIntern())) { - .variable => |variable| mod.markDeclIndexAlive(variable.decl), - .extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl), - .func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), + .variable => |variable| try mod.markDeclIndexAlive(variable.decl), + .extern_func => |extern_func| try mod.markDeclIndexAlive(extern_func.decl), + .func => |func| try mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), .error_union => |error_union| switch (error_union.val) { .err_name => {}, - .payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()), + .payload => |payload| try mod.markReferencedDeclsAlive(payload.toValue()), }, .ptr => |ptr| { switch (ptr.addr) { - .decl => |decl| mod.markDeclIndexAlive(decl), - .mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl), + .decl => |decl| try mod.markDeclIndexAlive(decl), + .mut_decl => |mut_decl| try mod.markDeclIndexAlive(mut_decl.decl), .int, .comptime_field => {}, - .eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()), - .elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()), + .eu_payload, .opt_payload => |parent| try mod.markReferencedDeclsAlive(parent.toValue()), + .elem, .field => |base_index| try mod.markReferencedDeclsAlive(base_index.base.toValue()), } - if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue()); + if (ptr.len != .none) try mod.markReferencedDeclsAlive(ptr.len.toValue()); }, - .opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()), + .opt => |opt| if (opt.val != .none) try mod.markReferencedDeclsAlive(opt.val.toValue()), .aggregate => |aggregate| for (aggregate.storage.values()) |elem| - mod.markReferencedDeclsAlive(elem.toValue()), + try mod.markReferencedDeclsAlive(elem.toValue()), .un => |un| { - mod.markReferencedDeclsAlive(un.tag.toValue()); - mod.markReferencedDeclsAlive(un.val.toValue()); + try mod.markReferencedDeclsAlive(un.tag.toValue()); + try mod.markReferencedDeclsAlive(un.val.toValue()); }, else => {}, } } -pub fn markDeclAlive(mod: *Module, decl: *Decl) void { +pub fn markDeclAlive(mod: *Module, decl: *Decl) Allocator.Error!void { if (decl.alive) return; decl.alive = true; + decl.val = (try decl.val.intern(decl.ty, mod)).toValue(); + // This is the first time we are marking this Decl alive. We must // therefore recurse into its value and mark any Decl it references // as also alive, so that any Decl referenced does not get garbage collected. - mod.markReferencedDeclsAlive(decl.val); + try mod.markReferencedDeclsAlive(decl.val); } -fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) void { +fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { return mod.markDeclAlive(mod.declPtr(decl_index)); } diff --git a/src/Sema.zig b/src/Sema.zig index 5395bb63d11a..e9c495891810 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5807,7 +5807,7 @@ pub fn analyzeExport( } // This decl is alive no matter what, since it's being exported - mod.markDeclAlive(exported_decl); + try mod.markDeclAlive(exported_decl); try sema.maybeQueueFuncBodyAnalysis(exported_decl_index); const gpa = sema.gpa; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b4e627e957a2..d9cb56404a08 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3019,7 +3019,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { const mod = func.bin_file.base.options.module.?; const decl = mod.declPtr(decl_index); - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); const ptr_ty = try mod.singleMutPtrType(decl.ty); return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index, offset); } @@ -3035,7 +3035,7 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind return WValue{ .imm32 = 0xaaaaaaaa }; } - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index); const atom = func.bin_file.getAtom(atom_index); diff --git a/src/codegen.zig b/src/codegen.zig index f343f0441d42..87aea6c245b2 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -673,7 +673,7 @@ fn lowerDeclRef( return Result.ok; } - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); const vaddr = try bin_file.getDeclVAddr(decl_index, .{ .parent_atom_index = reloc_info.parent_atom_index, @@ -782,7 +782,7 @@ fn genDeclRef( } } - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); const is_threadlocal = tv.val.isPtrToThreadLocal(mod) and !bin_file.options.single_threaded; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index f97292e5102e..2dcc332713d0 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1923,7 +1923,7 @@ pub const DeclGen = struct { fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: Decl.Index, export_index: u32) !void { const mod = dg.module; const decl = mod.declPtr(decl_index); - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); if (mod.decl_exports.get(decl_index)) |exports| { try writer.writeAll(exports.items[export_index].options.name); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 46b126ad84db..936b1d847ada 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3252,7 +3252,7 @@ pub const DeclGen = struct { else => unreachable, }; const fn_decl = dg.module.declPtr(fn_decl_index); - dg.module.markDeclAlive(fn_decl); + try dg.module.markDeclAlive(fn_decl); return dg.resolveLlvmFunction(fn_decl_index); }, .int => |int| { @@ -3831,7 +3831,7 @@ pub const DeclGen = struct { ) Error!*llvm.Value { const mod = dg.module; const decl = mod.declPtr(decl_index); - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); const ptr_ty = try mod.singleMutPtrType(decl.ty); return try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); } @@ -4006,7 +4006,7 @@ pub const DeclGen = struct { return self.lowerPtrToVoid(tv.ty); } - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); const llvm_decl_val = if (is_fn_body) try self.resolveLlvmFunction(decl_index) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 1a19bbdf91f7..43b67414934f 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -256,7 +256,7 @@ pub const DeclGen = struct { /// Note: Function does not actually generate the decl. fn resolveDecl(self: *DeclGen, decl_index: Module.Decl.Index) !SpvModule.Decl.Index { const decl = self.module.declPtr(decl_index); - self.module.markDeclAlive(decl); + try self.module.markDeclAlive(decl); const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { From 9afa97418350a51d8e27f1df903d8034507254ce Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 25 May 2023 23:48:39 -0400 Subject: [PATCH 102/205] InternPool: fix enough crashes to run `build-obj` on a simple program --- src/InternPool.zig | 57 ++++++++------ src/Liveness/Verify.zig | 68 +++++++++-------- src/Sema.zig | 50 ++++++------ src/codegen/llvm.zig | 25 +++--- src/print_air.zig | 2 +- src/value.zig | 140 +++++++++++++++++++++++++++------- tools/lldb_pretty_printers.py | 1 + 7 files changed, 228 insertions(+), 115 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index f1bc4e3accdf..88f3af6f7f67 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2298,7 +2298,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.* = undefined; } -pub fn indexToKey(ip: InternPool, index: Index) Key { +pub fn indexToKey(ip: *const InternPool, index: Index) Key { assert(index != .none); const item = ip.items.get(@enumToInt(index)); const data = item.data; @@ -2361,7 +2361,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_slice => { const ptr_type_index = @intToEnum(Index, data); - var result = indexToKey(ip, ptr_type_index).ptr_type; + var result = ip.indexToKey(ptr_type_index).ptr_type; result.size = .Slice; return .{ .ptr_type = result }; }, @@ -2454,9 +2454,9 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .values_map = .none, } }; }, - .type_enum_explicit => indexToKeyEnum(ip, data, .explicit), - .type_enum_nonexhaustive => indexToKeyEnum(ip, data, .nonexhaustive), - .type_function => .{ .func_type = indexToKeyFuncType(ip, data) }, + .type_enum_explicit => ip.indexToKeyEnum(data, .explicit), + .type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive), + .type_function => .{ .func_type = ip.indexToKeyFuncType(data) }, .undef => .{ .undef = @intToEnum(Index, data) }, .runtime_value => { @@ -2591,8 +2591,8 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .ty = .comptime_int_type, .storage = .{ .i64 = @bitCast(i32, data) }, } }, - .int_positive => indexToKeyBigInt(ip, data, true), - .int_negative => indexToKeyBigInt(ip, data, false), + .int_positive => ip.indexToKeyBigInt(data, true), + .int_negative => ip.indexToKeyBigInt(data, false), .int_small => { const info = ip.extraData(IntSmall, data); return .{ .int = .{ @@ -3430,22 +3430,25 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = try ip.addExtra(gpa, err), }), - .error_union => |error_union| ip.items.appendAssumeCapacity(switch (error_union.val) { - .err_name => |err_name| .{ - .tag = .error_union_error, - .data = try ip.addExtra(gpa, Key.Error{ - .ty = error_union.ty, - .name = err_name, - }), - }, - .payload => |payload| .{ - .tag = .error_union_payload, - .data = try ip.addExtra(gpa, TypeValue{ - .ty = error_union.ty, - .val = payload, - }), - }, - }), + .error_union => |error_union| { + assert(ip.indexToKey(error_union.ty) == .error_union_type); + ip.items.appendAssumeCapacity(switch (error_union.val) { + .err_name => |err_name| .{ + .tag = .error_union_error, + .data = try ip.addExtra(gpa, Key.Error{ + .ty = error_union.ty, + .name = err_name, + }), + }, + .payload => |payload| .{ + .tag = .error_union_payload, + .data = try ip.addExtra(gpa, TypeValue{ + .ty = error_union.ty, + .val = payload, + }), + }, + }); + }, .enum_literal => |enum_literal| ip.items.appendAssumeCapacity(.{ .tag = .enum_literal, @@ -4191,6 +4194,7 @@ pub fn sliceLen(ip: InternPool, i: Index) Index { /// * ptr <=> ptr /// * null_value => opt /// * payload => opt +/// * error set <=> error set pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { const old_ty = ip.typeOf(val); if (old_ty == new_ty) return val; @@ -4230,6 +4234,13 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al } }), else => {}, }, + .err => |err| switch (ip.indexToKey(new_ty)) { + .error_set_type, .inferred_error_set_type => return ip.get(gpa, .{ .err = .{ + .ty = new_ty, + .name = err.name, + } }), + else => {}, + }, else => {}, } switch (ip.indexToKey(new_ty)) { diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index cbb7f9f1435b..e8b024eb6f0d 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -61,10 +61,10 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .work_item_id, .work_group_size, .work_group_id, - => try self.verifyInst(inst, .{ .none, .none, .none }), + => try self.verifyInstOperands(inst, .{ .none, .none, .none }), .trap, .unreach => { - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInstOperands(inst, .{ .none, .none, .none }); // This instruction terminates the function, so everything should be dead if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst}); }, @@ -113,7 +113,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .c_va_copy, => { const ty_op = data[inst].ty_op; - try self.verifyInst(inst, .{ ty_op.operand, .none, .none }); + try self.verifyInstOperands(inst, .{ ty_op.operand, .none, .none }); }, .is_null, .is_non_null, @@ -149,13 +149,13 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .c_va_end, => { const un_op = data[inst].un_op; - try self.verifyInst(inst, .{ un_op, .none, .none }); + try self.verifyInstOperands(inst, .{ un_op, .none, .none }); }, .ret, .ret_load, => { const un_op = data[inst].un_op; - try self.verifyInst(inst, .{ un_op, .none, .none }); + try self.verifyInstOperands(inst, .{ un_op, .none, .none }); // This instruction terminates the function, so everything should be dead if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst}); }, @@ -164,36 +164,36 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .wasm_memory_grow, => { const pl_op = data[inst].pl_op; - try self.verifyInst(inst, .{ pl_op.operand, .none, .none }); + try self.verifyInstOperands(inst, .{ pl_op.operand, .none, .none }); }, .prefetch => { const prefetch = data[inst].prefetch; - try self.verifyInst(inst, .{ prefetch.ptr, .none, .none }); + try self.verifyInstOperands(inst, .{ prefetch.ptr, .none, .none }); }, .reduce, .reduce_optimized, => { const reduce = data[inst].reduce; - try self.verifyInst(inst, .{ reduce.operand, .none, .none }); + try self.verifyInstOperands(inst, .{ reduce.operand, .none, .none }); }, .union_init => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.init, .none, .none }); + try self.verifyInstOperands(inst, .{ extra.init, .none, .none }); }, .struct_field_ptr, .struct_field_val => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.struct_operand, .none, .none }); + try self.verifyInstOperands(inst, .{ extra.struct_operand, .none, .none }); }, .field_parent_ptr => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.field_ptr, .none, .none }); + try self.verifyInstOperands(inst, .{ extra.field_ptr, .none, .none }); }, .atomic_load => { const atomic_load = data[inst].atomic_load; - try self.verifyInst(inst, .{ atomic_load.ptr, .none, .none }); + try self.verifyInstOperands(inst, .{ atomic_load.ptr, .none, .none }); }, // binary @@ -263,7 +263,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .memcpy, => { const bin_op = data[inst].bin_op; - try self.verifyInst(inst, .{ bin_op.lhs, bin_op.rhs, .none }); + try self.verifyInstOperands(inst, .{ bin_op.lhs, bin_op.rhs, .none }); }, .add_with_overflow, .sub_with_overflow, @@ -277,48 +277,48 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.lhs, extra.rhs, .none }); + try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, .none }); }, .shuffle => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.a, extra.b, .none }); + try self.verifyInstOperands(inst, .{ extra.a, extra.b, .none }); }, .cmp_vector, .cmp_vector_optimized, => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.VectorCmp, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.lhs, extra.rhs, .none }); + try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, .none }); }, .atomic_rmw => { const pl_op = data[inst].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; - try self.verifyInst(inst, .{ pl_op.operand, extra.operand, .none }); + try self.verifyInstOperands(inst, .{ pl_op.operand, extra.operand, .none }); }, // ternary .select => { const pl_op = data[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - try self.verifyInst(inst, .{ pl_op.operand, extra.lhs, extra.rhs }); + try self.verifyInstOperands(inst, .{ pl_op.operand, extra.lhs, extra.rhs }); }, .mul_add => { const pl_op = data[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - try self.verifyInst(inst, .{ extra.lhs, extra.rhs, pl_op.operand }); + try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, pl_op.operand }); }, .vector_store_elem => { const vector_store_elem = data[inst].vector_store_elem; const extra = self.air.extraData(Air.Bin, vector_store_elem.payload).data; - try self.verifyInst(inst, .{ vector_store_elem.vector_ptr, extra.lhs, extra.rhs }); + try self.verifyInstOperands(inst, .{ vector_store_elem.vector_ptr, extra.lhs, extra.rhs }); }, .cmpxchg_strong, .cmpxchg_weak, => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.ptr, extra.expected_value, extra.new_value }); + try self.verifyInstOperands(inst, .{ extra.ptr, extra.expected_value, extra.new_value }); }, // big tombs @@ -332,7 +332,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (elements) |element| { try self.verifyOperand(inst, element, bt.feed()); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .call, .call_always_tail, .call_never_tail, .call_never_inline => { const pl_op = data[inst].pl_op; @@ -347,7 +347,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (args) |arg| { try self.verifyOperand(inst, arg, bt.feed()); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .assembly => { const ty_pl = data[inst].ty_pl; @@ -373,7 +373,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (inputs) |input| { try self.verifyOperand(inst, input, bt.feed()); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, // control flow @@ -397,7 +397,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (cond_br_liveness.then_deaths) |death| try self.verifyDeath(inst, death); - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .try_ptr => { const ty_pl = data[inst].ty_pl; @@ -419,7 +419,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (cond_br_liveness.then_deaths) |death| try self.verifyDeath(inst, death); - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .br => { const br = data[inst].br; @@ -431,7 +431,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { } else { gop.value_ptr.* = try self.live.clone(self.gpa); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .block => { const ty_pl = data[inst].ty_pl; @@ -462,7 +462,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { try self.verifyMatchingLiveness(inst, live); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInstOperands(inst, .{ .none, .none, .none }); }, .loop => { const ty_pl = data[inst].ty_pl; @@ -477,7 +477,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { // The same stuff should be alive after the loop as before it try self.verifyMatchingLiveness(inst, live); - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInstOperands(inst, .{ .none, .none, .none }); }, .cond_br => { const pl_op = data[inst].pl_op; @@ -500,7 +500,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (cond_br_liveness.else_deaths) |death| try self.verifyDeath(inst, death); try self.verifyBody(else_body); - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .switch_br => { const pl_op = data[inst].pl_op; @@ -544,7 +544,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { try self.verifyBody(else_body); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, } } @@ -570,7 +570,7 @@ fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies } } -fn verifyInst( +fn verifyInstOperands( self: *Verify, inst: Air.Inst.Index, operands: [Liveness.bpi - 1]Air.Inst.Ref, @@ -579,6 +579,10 @@ fn verifyInst( const dies = self.liveness.operandDies(inst, @intCast(Liveness.OperandInt, operand_index)); try self.verifyOperand(inst, operand, dies); } + try self.verifyInst(inst); +} + +fn verifyInst(self: *Verify, inst: Air.Inst.Index) Error!void { if (self.air.instructions.items(.tag)[inst] == .interned) return; if (self.liveness.isUnused(inst)) { assert(!self.live.contains(inst)); diff --git a/src/Sema.zig b/src/Sema.zig index e9c495891810..b0d36c46991a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -16687,7 +16687,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const new_decl_ty = try mod.arrayType(.{ .len = bytes.len, .child = .u8_type, - .sentinel = .zero_u8, }); const new_decl = try anon_decl.finish( new_decl_ty, @@ -16698,7 +16697,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_sentinel_0_type, + .ty = .slice_const_u8_type, .addr = .{ .decl = new_decl }, .len = (try mod.intValue(Type.usize, bytes.len)).toIntern(), } }); @@ -23991,7 +23990,6 @@ fn panicWithMsg( msg_inst: Air.Inst.Ref, ) !void { const mod = sema.mod; - const arena = sema.arena; if (!mod.backendSupportsFeature(.panic_fn)) { _ = try block.addNoOp(.trap); @@ -24001,16 +23999,22 @@ fn panicWithMsg( const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const target = mod.getTarget(); - const ptr_stack_trace_ty = try Type.ptr(arena, mod, .{ - .pointee_type = stack_trace_ty, - .@"addrspace" = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic + const ptr_stack_trace_ty = try mod.ptrType(.{ + .elem_type = stack_trace_ty.toIntern(), + .address_space = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic }); - const null_stack_trace = try sema.addConstant( - try Type.optional(arena, ptr_stack_trace_ty, mod), - Value.null, - ); - const args: [3]Air.Inst.Ref = .{ msg_inst, null_stack_trace, .null_value }; - try sema.callBuiltin(block, panic_fn, .auto, &args); + const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern()); + const null_stack_trace = try sema.addConstant(opt_ptr_stack_trace_ty, (try mod.intern(.{ .opt = .{ + .ty = opt_ptr_stack_trace_ty.toIntern(), + .val = .none, + } })).toValue()); + + const opt_usize_ty = try mod.optionalType(.usize_type); + const null_ret_addr = try sema.addConstant(opt_usize_ty, (try mod.intern(.{ .opt = .{ + .ty = opt_usize_ty.toIntern(), + .val = .none, + } })).toValue()); + try sema.callBuiltin(block, panic_fn, .auto, &.{ msg_inst, null_stack_trace, null_ret_addr }); } fn panicUnwrapError( @@ -29395,13 +29399,10 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value { const mod = sema.mod; - const val = opt_val orelse return Value.null; - const ptr_val = try sema.refValue(block, ty, val); - const result = try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType((try mod.singleConstPtrType(ty)).toIntern())).toIntern(), - .val = ptr_val.toIntern(), - } }); - return result.toValue(); + return (try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType((try mod.singleConstPtrType(Type.anyopaque)).toIntern())).toIntern(), + .val = if (opt_val) |val| (try sema.refValue(block, ty, val)).toIntern() else .none, + } })).toValue(); } fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref { @@ -30603,7 +30604,7 @@ fn wrapErrorUnionPayload( if (try sema.resolveMaybeUndefVal(coerced)) |val| { return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{ .ty = dest_ty.toIntern(), - .val = .{ .payload = val.toIntern() }, + .val = .{ .payload = try val.intern(dest_payload_ty, mod) }, } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -30647,7 +30648,12 @@ fn wrapErrorUnionSet( else => unreachable, }, } - return sema.addConstant(dest_ty, val); + return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{ + .ty = dest_ty.toIntern(), + .val = .{ + .err_name = mod.intern_pool.indexToKey(try val.intern(dest_err_set_ty, mod)).err.name, + }, + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -33325,7 +33331,7 @@ fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { } fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { - return sema.addConstant(ty, Value.undef); + return sema.addConstant(ty, (try sema.mod.intern(.{ .undef = ty.toIntern() })).toValue()); } pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 936b1d847ada..bbedc1160cfd 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3267,21 +3267,28 @@ pub const DeclGen = struct { return llvm_ty.constInt(kv.value, .False); }, .error_union => |error_union| { + const err_tv: TypedValue = switch (error_union.val) { + .err_name => |err_name| .{ + .ty = tv.ty.errorUnionSet(mod), + .val = (try mod.intern(.{ .err = .{ + .ty = tv.ty.errorUnionSet(mod).toIntern(), + .name = err_name, + } })).toValue(), + }, + .payload => .{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, 0), + }, + }; const payload_type = tv.ty.errorUnionPayload(mod); - const is_pl = tv.val.errorUnionIsPayload(mod); - if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) tv.val else try mod.intValue(Type.err_int, 0); - return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); + return dg.lowerValue(err_tv); } const payload_align = payload_type.abiAlignment(mod); - const error_align = Type.anyerror.abiAlignment(mod); - const llvm_error_value = try dg.lowerValue(.{ - .ty = Type.anyerror, - .val = if (is_pl) try mod.intValue(Type.err_int, 0) else tv.val, - }); + const error_align = err_tv.ty.abiAlignment(mod); + const llvm_error_value = try dg.lowerValue(err_tv); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, .val = switch (error_union.val) { diff --git a/src/print_air.zig b/src/print_air.zig index 204f5ddeb991..800fbc43c26d 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -703,7 +703,7 @@ const Writer = struct { const pl_op = w.air.instructions.items(.data)[inst].pl_op; try w.writeOperand(s, inst, 0, pl_op.operand); const name = w.air.nullTerminatedString(pl_op.payload); - try s.print(", {s}", .{name}); + try s.print(", \"{}\"", .{std.zig.fmtEscapes(name)}); } fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { diff --git a/src/value.zig b/src/value.zig index 498b7d33396a..bf201aceaf6e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -347,6 +347,43 @@ pub const Value = struct { pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), ty.toIntern()); switch (val.tag()) { + .eu_payload => { + const pl = val.castTag(.eu_payload).?.data; + return mod.intern(.{ .error_union = .{ + .ty = ty.toIntern(), + .val = .{ .payload = try pl.intern(ty.errorUnionPayload(mod), mod) }, + } }); + }, + .opt_payload => { + const pl = val.castTag(.opt_payload).?.data; + return mod.intern(.{ .opt = .{ + .ty = ty.toIntern(), + .val = try pl.intern(ty.optionalChild(mod), mod), + } }); + }, + .slice => { + const pl = val.castTag(.slice).?.data; + const ptr = try pl.ptr.intern(ty.optionalChild(mod), mod); + var ptr_key = mod.intern_pool.indexToKey(ptr).ptr; + assert(ptr_key.len == .none); + ptr_key.ty = ty.toIntern(); + ptr_key.len = try pl.len.intern(Type.usize, mod); + return mod.intern(.{ .ptr = ptr_key }); + }, + .bytes => { + const pl = val.castTag(.bytes).?.data; + return mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = pl }, + } }); + }, + .repeated => { + const pl = val.castTag(.repeated).?.data; + return mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = try pl.intern(ty.childType(mod), mod) }, + } }); + }, .aggregate => { const old_elems = val.castTag(.aggregate).?.data; const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); @@ -372,24 +409,74 @@ pub const Value = struct { .val = try pl.val.intern(ty.unionFieldType(pl.tag, mod), mod), } }); }, - else => unreachable, } } pub fn unintern(val: Value, arena: Allocator, mod: *Module) Allocator.Error!Value { - if (val.ip_index == .none) return val; - switch (mod.intern_pool.indexToKey(val.toIntern())) { + return if (val.ip_index == .none) val else switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .enum_literal, + .enum_tag, + .float, + => val, + + .error_union => |error_union| switch (error_union.val) { + .err_name => val, + .payload => |payload| Tag.eu_payload.create(arena, payload.toValue()), + }, + + .ptr => |ptr| switch (ptr.len) { + .none => val, + else => |len| Tag.slice.create(arena, .{ + .ptr = val.slicePtr(mod), + .len = len.toValue(), + }), + }, + + .opt => |opt| switch (opt.val) { + .none => val, + else => |payload| Tag.opt_payload.create(arena, payload.toValue()), + }, + .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => |bytes| return Tag.bytes.create(arena, try arena.dupe(u8, bytes)), + .bytes => |bytes| Tag.bytes.create(arena, try arena.dupe(u8, bytes)), .elems => |old_elems| { const new_elems = try arena.alloc(Value, old_elems.len); for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = old_elem.toValue(); return Tag.aggregate.create(arena, new_elems); }, - .repeated_elem => |elem| return Tag.repeated.create(arena, elem.toValue()), + .repeated_elem => |elem| Tag.repeated.create(arena, elem.toValue()), }, - else => return val, - } + + .un => |un| Tag.@"union".create(arena, .{ + .tag = un.tag.toValue(), + .val = un.val.toValue(), + }), + }; } pub fn toIntern(val: Value) InternPool.Index { @@ -1896,7 +1983,7 @@ pub const Value = struct { /// Returns true if a Value is backed by a variable pub fn isVariable(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { + return val.ip_index != .none and switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => true, .ptr => |ptr| switch (ptr.addr) { .decl => |decl_index| { @@ -1919,28 +2006,25 @@ pub const Value = struct { } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - return switch (val.ip_index) { - .none => false, - else => switch (mod.intern_pool.indexToKey(val.toIntern())) { - .variable => |variable| variable.is_threadlocal, - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl_index| { - const decl = mod.declPtr(decl_index); - assert(decl.has_tv); - return decl.val.isPtrToThreadLocal(mod); - }, - .mut_decl => |mut_decl| { - const decl = mod.declPtr(mut_decl.decl); - assert(decl.has_tv); - return decl.val.isPtrToThreadLocal(mod); - }, - .int => false, - .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod), - .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod), - .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod), + return val.ip_index != .none and switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| variable.is_threadlocal, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); }, - else => false, + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .int => false, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod), + .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod), }, + else => false, }; } diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 36e497afb9c5..740857083065 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -681,6 +681,7 @@ def __lldb_init_module(debugger, _=None): add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Air\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True) add(debugger, category='zig.stage2', regex=True, type='^Air\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True) add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True) + add(debugger, category='zig.stage2', type='Module.LazySrcLoc', identifier='zig_TaggedUnion', synth=True) add(debugger, category='zig.stage2', type='InternPool.Index', synth=True) add(debugger, category='zig.stage2', type='InternPool.Key', identifier='zig_TaggedUnion', synth=True) add(debugger, category='zig.stage2', type='InternPool.Key.Int.Storage', identifier='zig_TaggedUnion', synth=True) From 66c43968546e38879a2d4c3f2264e10676deef73 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 25 May 2023 23:04:15 -0700 Subject: [PATCH 103/205] AIR: eliminate the `values` array --- src/Air.zig | 16 +++++++++------- src/Module.zig | 1 - src/Sema.zig | 27 +++++++++------------------ src/arch/aarch64/CodeGen.zig | 4 ++-- src/arch/arm/CodeGen.zig | 4 ++-- src/arch/riscv64/CodeGen.zig | 4 ++-- src/arch/sparc64/CodeGen.zig | 4 ++-- src/arch/wasm/CodeGen.zig | 2 +- src/arch/x86_64/CodeGen.zig | 4 ++-- src/codegen/c.zig | 6 +++--- src/codegen/llvm.zig | 10 +++++----- src/codegen/spirv.zig | 2 +- src/print_air.zig | 22 ++++++---------------- 13 files changed, 44 insertions(+), 62 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index e6cfc8c116ff..56f7d4cf01b3 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -17,7 +17,6 @@ instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. /// The first few indexes are reserved. See `ExtraIndex` for the values. extra: []const u32, -values: []const Value, pub const ExtraIndex = enum(u32) { /// Payload index of the main `Block` in the `extra` array. @@ -421,10 +420,10 @@ pub const Inst = struct { /// Marks the end of a semantic scope for debug info variables. dbg_block_end, /// Marks the start of an inline call. - /// Uses `ty_pl` with the payload being the index of a Value.Function in air.values. + /// Uses the `ty_fn` field. dbg_inline_begin, /// Marks the end of an inline call. - /// Uses `ty_pl` with the payload being the index of a Value.Function in air.values. + /// Uses the `ty_fn` field. dbg_inline_end, /// Marks the beginning of a local variable. The operand is a pointer pointing /// to the storage for the variable. The local may be a const or a var. @@ -967,6 +966,10 @@ pub const Inst = struct { // Index into a different array. payload: u32, }, + ty_fn: struct { + ty: Ref, + func: Module.Fn.Index, + }, br: struct { block_inst: Index, operand: Ref, @@ -1090,8 +1093,7 @@ pub const FieldParentPtr = struct { pub const Shuffle = struct { a: Inst.Ref, b: Inst.Ref, - // index to air_values - mask: u32, + mask: InternPool.Index, mask_len: u32, }; @@ -1469,7 +1471,8 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end u32 => air.extra[i], Inst.Ref => @intToEnum(Inst.Ref, air.extra[i]), i32 => @bitCast(i32, air.extra[i]), - else => @compileError("bad field type"), + InternPool.Index => @intToEnum(InternPool.Index, air.extra[i]), + else => @compileError("bad field type: " ++ @typeName(field.type)), }; i += 1; } @@ -1482,7 +1485,6 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end pub fn deinit(air: *Air, gpa: std.mem.Allocator) void { air.instructions.deinit(gpa); gpa.free(air.extra); - gpa.free(air.values); air.* = undefined; } diff --git a/src/Module.zig b/src/Module.zig index 76e2142ae6de..3dd89f126925 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5720,7 +5720,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE return Air{ .instructions = sema.air_instructions.toOwnedSlice(), .extra = try sema.air_extra.toOwnedSlice(gpa), - .values = try sema.air_values.toOwnedSlice(gpa), }; } diff --git a/src/Sema.zig b/src/Sema.zig index b0d36c46991a..003481084628 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -17,7 +17,6 @@ perm_arena: Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, air_extra: std.ArrayListUnmanaged(u32) = .{}, -air_values: std.ArrayListUnmanaged(Value) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -772,7 +771,6 @@ pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; sema.air_instructions.deinit(gpa); sema.air_extra.deinit(gpa); - sema.air_values.deinit(gpa); sema.inst_map.deinit(gpa); sema.decl_val_table.deinit(gpa); sema.types_to_resolve.deinit(gpa); @@ -2018,10 +2016,8 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( } const air_datas = sema.air_instructions.items(.data); const val = switch (air_tags[i]) { - .inferred_alloc, .inferred_alloc_comptime => val: { - const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; - break :val sema.air_values.items[ty_pl.payload]; - }, + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, .interned => air_datas[i].interned.toValue(), else => return null, }; @@ -7930,20 +7926,17 @@ fn emitDbgInline( new_func_ty: Type, tag: Air.Inst.Tag, ) CompileError!void { - if (sema.mod.comp.bin_file.options.strip) return; + const mod = sema.mod; + if (mod.comp.bin_file.options.strip) return; // Recursive inline call; no dbg_inline needed. if (old_func == new_func) return; - try sema.air_values.append(sema.gpa, (try sema.mod.intern(.{ .func = .{ - .ty = new_func_ty.toIntern(), - .index = new_func, - } })).toValue()); _ = try block.addInst(.{ .tag = tag, - .data = .{ .ty_pl = .{ + .data = .{ .ty_fn = .{ .ty = try sema.addType(new_func_ty), - .payload = @intCast(u32, sema.air_values.items.len - 1), + .func = new_func, } }, }); } @@ -21724,8 +21717,6 @@ fn analyzeShuffle( } } - const mask_index = @intCast(u32, sema.air_values.items.len); - try sema.air_values.append(sema.gpa, mask); return block.addInst(.{ .tag = .shuffle, .data = .{ .ty_pl = .{ @@ -21733,7 +21724,7 @@ fn analyzeShuffle( .payload = try block.sema.addExtra(Air.Shuffle{ .a = a, .b = b, - .mask = mask_index, + .mask = mask.toIntern(), .mask_len = mask_len, }), } }, @@ -33311,7 +33302,6 @@ pub fn getTmpAir(sema: Sema) Air { return .{ .instructions = sema.air_instructions.slice(), .extra = sema.air_extra.items, - .values = sema.air_values.items, }; } @@ -33371,7 +33361,8 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { u32 => @field(extra, field.name), Air.Inst.Ref => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), - else => @compileError("bad field type"), + InternPool.Index => @enumToInt(@field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), }); } return result; diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 54a34e8f09f4..3afb510d4320 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4621,9 +4621,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 8f1a8fdb670d..5f476a2e8028 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4568,9 +4568,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 660630503ddd..5417650dd58c 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1875,9 +1875,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index c7376a6eb7de..354af50b61ab 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1660,9 +1660,9 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d9cb56404a08..0c771974179a 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -4947,7 +4947,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const a = try func.resolveInst(extra.a); const b = try func.resolveInst(extra.b); - const mask = func.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; const child_ty = inst_ty.childType(mod); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 48504dee8fb5..00f5b3f3daae 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8541,9 +8541,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .unreach, .{ .none, .none, .none }); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 2dcc332713d0..59d00f5849f0 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -4302,10 +4302,10 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { } fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { - const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = f.air.instructions.items(.data)[inst].ty_fn; const mod = f.object.dg.module; const writer = f.object.writer(); - const function = f.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name}); return .none; } @@ -6612,7 +6612,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data; - const mask = f.air.values[extra.mask]; + const mask = extra.mask.toValue(); const lhs = try f.resolveInst(extra.a); const rhs = try f.resolveInst(extra.b); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index bbedc1160cfd..dd07b5edbd09 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -5927,10 +5927,10 @@ pub const FuncGen = struct { fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const dib = self.dg.object.di_builder orelse return null; - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.dg.module; - const func = self.air.values[ty_pl.payload].getFunction(mod).?; + const func = mod.funcPtr(ty_fn.func); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); @@ -5986,10 +5986,10 @@ pub const FuncGen = struct { fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { if (self.dg.object.di_builder == null) return null; - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.dg.module; - const func = self.air.values[ty_pl.payload].getFunction(mod).?; + const func = mod.funcPtr(ty_fn.func); const decl = mod.declPtr(func.owner_decl); const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; @@ -8875,7 +8875,7 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolveInst(extra.a); const b = try self.resolveInst(extra.b); - const mask = self.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; const a_len = self.typeOf(extra.a).vectorLen(mod); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 43b67414934f..80e98dbcd3c5 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2074,7 +2074,7 @@ pub const DeclGen = struct { const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolve(extra.a); const b = try self.resolve(extra.b); - const mask = self.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; const a_len = self.typeOf(extra.a).vectorLen(mod); diff --git a/src/print_air.zig b/src/print_air.zig index 800fbc43c26d..be7bc9610d8e 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -15,12 +15,11 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo // the debug safety tag but we want to measure release size. (@sizeOf(Air.Inst.Tag) + 8); const extra_bytes = air.extra.len * @sizeOf(u32); - const values_bytes = air.values.len * @sizeOf(Value); const tomb_bytes = if (liveness) |l| l.tomb_bits.len * @sizeOf(usize) else 0; const liveness_extra_bytes = if (liveness) |l| l.extra.len * @sizeOf(u32) else 0; const liveness_special_bytes = if (liveness) |l| l.special.count() * 8 else 0; const total_bytes = @sizeOf(Air) + instruction_bytes + extra_bytes + - values_bytes + @sizeOf(Liveness) + liveness_extra_bytes + + @sizeOf(Liveness) + liveness_extra_bytes + liveness_special_bytes + tomb_bytes; // zig fmt: off @@ -28,7 +27,6 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo \\# Total AIR+Liveness bytes: {} \\# AIR Instructions: {d} ({}) \\# AIR Extra Data: {d} ({}) - \\# AIR Values Bytes: {d} ({}) \\# Liveness tomb_bits: {} \\# Liveness Extra Data: {d} ({}) \\# Liveness special table: {d} ({}) @@ -37,7 +35,6 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo fmtIntSizeBin(total_bytes), air.instructions.len, fmtIntSizeBin(instruction_bytes), air.extra.len, fmtIntSizeBin(extra_bytes), - air.values.len, fmtIntSizeBin(values_bytes), fmtIntSizeBin(tomb_bytes), if (liveness) |l| l.extra.len else 0, fmtIntSizeBin(liveness_extra_bytes), if (liveness) |l| l.special.count() else 0, fmtIntSizeBin(liveness_special_bytes), @@ -300,7 +297,8 @@ const Writer = struct { .struct_field_ptr => try w.writeStructField(s, inst), .struct_field_val => try w.writeStructField(s, inst), - .inferred_alloc, .inferred_alloc_comptime => try w.writeConstant(s, inst), + .inferred_alloc => @panic("TODO"), + .inferred_alloc_comptime => @panic("TODO"), .interned => try w.writeInterned(s, inst), .assembly => try w.writeAssembly(s, inst), .dbg_stmt => try w.writeDbgStmt(s, inst), @@ -598,14 +596,6 @@ const Writer = struct { try s.print(", {d}", .{extra.field_index}); } - fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; - const val = w.air.values[ty_pl.payload]; - const ty = w.air.getRefType(ty_pl.ty); - try w.writeType(s, ty); - try s.print(", {}", .{val.fmtValue(ty, w.module)}); - } - fn writeInterned(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const mod = w.module; const ip_index = w.air.instructions.items(.data)[inst].interned; @@ -693,9 +683,9 @@ const Writer = struct { } fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; - const func_index = w.module.intern_pool.indexToFunc(w.air.values[ty_pl.payload].ip_index); - const owner_decl = w.module.declPtr(w.module.funcPtrUnwrap(func_index).?.owner_decl); + const ty_fn = w.air.instructions.items(.data)[inst].ty_fn; + const func_index = ty_fn.func; + const owner_decl = w.module.declPtr(w.module.funcPtr(func_index).owner_decl); try s.print("{s}", .{owner_decl.name}); } From f2c716187cf486e519482ef014b34f7271cee3cf Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 26 May 2023 03:41:35 -0400 Subject: [PATCH 104/205] InternPool: fix more crashes --- src/InternPool.zig | 245 ++++++++++++--------- src/Module.zig | 5 + src/Sema.zig | 218 ++++++++++--------- src/codegen.zig | 28 +-- src/codegen/llvm.zig | 2 +- src/type.zig | 392 +++++++++++++++------------------- src/value.zig | 47 ++-- tools/lldb_pretty_printers.py | 13 +- 8 files changed, 502 insertions(+), 448 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 88f3af6f7f67..ad47b4c84eb5 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -650,8 +650,14 @@ pub const Key = union(enum) { .enum_type => |enum_type| std.hash.autoHash(hasher, enum_type.decl), .variable => |variable| std.hash.autoHash(hasher, variable.decl), - .extern_func => |extern_func| std.hash.autoHash(hasher, extern_func.decl), - .func => |func| std.hash.autoHash(hasher, func.index), + .extern_func => |extern_func| { + std.hash.autoHash(hasher, extern_func.ty); + std.hash.autoHash(hasher, extern_func.decl); + }, + .func => |func| { + std.hash.autoHash(hasher, func.ty); + std.hash.autoHash(hasher, func.index); + }, .int => |int| { // Canonicalize all integers by converting them to BigIntConst. @@ -854,11 +860,11 @@ pub const Key = union(enum) { }, .extern_func => |a_info| { const b_info = b.extern_func; - return a_info.decl == b_info.decl; + return a_info.ty == b_info.ty and a_info.decl == b_info.decl; }, .func => |a_info| { const b_info = b.func; - return a_info.index == b_info.index; + return a_info.ty == b_info.ty and a_info.index == b_info.index; }, .ptr => |a_info| { @@ -1340,8 +1346,8 @@ pub const Index = enum(u32) { float_c_longdouble_f128: struct { data: *Float128 }, float_comptime_float: struct { data: *Float128 }, variable: struct { data: *Variable }, - extern_func: struct { data: void }, - func: struct { data: void }, + extern_func: struct { data: *Key.ExternFunc }, + func: struct { data: *Key.Func }, only_possible_value: DataIsIndex, union_value: struct { data: *Key.Union }, bytes: struct { data: *Bytes }, @@ -3216,6 +3222,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .opt => |opt| { assert(ip.isOptionalType(opt.ty)); + assert(opt.val == .none or ip.indexToKey(opt.ty).opt_type == ip.typeOf(opt.val)); ip.items.appendAssumeCapacity(if (opt.val == .none) .{ .tag = .opt_null, .data = @enumToInt(opt.ty), @@ -3226,23 +3233,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .int => |int| b: { - switch (int.ty) { - .usize_type, - .isize_type, - .c_char_type, - .c_short_type, - .c_ushort_type, - .c_int_type, - .c_uint_type, - .c_long_type, - .c_ulong_type, - .c_longlong_type, - .c_ulonglong_type, - .c_longdouble_type, - .comptime_int_type, - => {}, - else => assert(ip.indexToKey(int.ty) == .int_type), - } + assert(ip.isIntegerType(int.ty)); switch (int.storage) { .u64, .i64, .big_int => {}, .lazy_align, .lazy_size => |lazy_ty| { @@ -3425,13 +3416,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } }, - .err => |err| ip.items.appendAssumeCapacity(.{ - .tag = .error_set_error, - .data = try ip.addExtra(gpa, err), - }), + .err => |err| { + assert(ip.isErrorSetType(err.ty)); + ip.items.appendAssumeCapacity(.{ + .tag = .error_set_error, + .data = try ip.addExtra(gpa, err), + }); + }, .error_union => |error_union| { - assert(ip.indexToKey(error_union.ty) == .error_union_type); + assert(ip.isErrorUnionType(error_union.ty)); ip.items.appendAssumeCapacity(switch (error_union.val) { .err_name => |err_name| .{ .tag = .error_union_error, @@ -3456,9 +3450,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }), .enum_tag => |enum_tag| { - assert(enum_tag.ty != .none); - assert(enum_tag.int != .none); - + assert(ip.isEnumType(enum_tag.ty)); + assert(ip.indexToKey(enum_tag.int) == .int); ip.items.appendAssumeCapacity(.{ .tag = .enum_tag, .data = try ip.addExtra(gpa, enum_tag), @@ -4191,69 +4184,93 @@ pub fn sliceLen(ip: InternPool, i: Index) Index { /// * identity coercion /// * int <=> int /// * int <=> enum +/// * enum_literal => enum /// * ptr <=> ptr /// * null_value => opt /// * payload => opt /// * error set <=> error set +/// * error union <=> error union +/// * error set => error union +/// * payload => error union +/// * fn <=> fn pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { const old_ty = ip.typeOf(val); if (old_ty == new_ty) return val; switch (ip.indexToKey(val)) { - .int => |int| switch (ip.indexToKey(new_ty)) { - .simple_type => |simple_type| switch (simple_type) { - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .comptime_int, - => return getCoercedInts(ip, gpa, int, new_ty), - else => {}, - }, - .int_type => return getCoercedInts(ip, gpa, int, new_ty), - .enum_type => return ip.get(gpa, .{ .enum_tag = .{ + .extern_func => |extern_func| if (ip.isFunctionType(new_ty)) + return ip.get(gpa, .{ .extern_func = .{ + .ty = new_ty, + .decl = extern_func.decl, + .lib_name = extern_func.lib_name, + } }), + .func => |func| if (ip.isFunctionType(new_ty)) + return ip.get(gpa, .{ .func = .{ + .ty = new_ty, + .index = func.index, + } }), + .int => |int| if (ip.isIntegerType(new_ty)) + return getCoercedInts(ip, gpa, int, new_ty) + else if (ip.isEnumType(new_ty)) + return ip.get(gpa, .{ .enum_tag = .{ .ty = new_ty, .int = val, } }), + .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty)) + return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty), + .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) { + .enum_type => |enum_type| { + const index = enum_type.nameIndex(ip, enum_literal).?; + return ip.get(gpa, .{ .enum_tag = .{ + .ty = new_ty, + .int = if (enum_type.values.len != 0) + enum_type.values[index] + else + try ip.get(gpa, .{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = index }, + } }), + } }); + }, else => {}, }, - .enum_tag => |enum_tag| { - // Assume new_ty is an integer type. - return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty); - }, - .ptr => |ptr| switch (ip.indexToKey(new_ty)) { - .ptr_type => return ip.get(gpa, .{ .ptr = .{ + .ptr => |ptr| if (ip.isPointerType(new_ty)) + return ip.get(gpa, .{ .ptr = .{ .ty = new_ty, .addr = ptr.addr, + .len = ptr.len, } }), - else => {}, - }, - .err => |err| switch (ip.indexToKey(new_ty)) { - .error_set_type, .inferred_error_set_type => return ip.get(gpa, .{ .err = .{ + .err => |err| if (ip.isErrorSetType(new_ty)) + return ip.get(gpa, .{ .err = .{ .ty = new_ty, .name = err.name, + } }) + else if (ip.isErrorUnionType(new_ty)) + return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = .{ .err_name = err.name }, + } }), + .error_union => |error_union| if (ip.isErrorUnionType(new_ty)) + return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = error_union.val, } }), - else => {}, - }, else => {}, } switch (ip.indexToKey(new_ty)) { - .opt_type => |child_ty| switch (val) { + .opt_type => |child_type| switch (val) { .null_value => return ip.get(gpa, .{ .opt = .{ .ty = new_ty, .val = .none, } }), else => return ip.get(gpa, .{ .opt = .{ .ty = new_ty, - .val = try ip.getCoerced(gpa, val, child_ty), + .val = try ip.getCoerced(gpa, val, child_type), } }), }, + .error_union_type => |error_union_type| return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = .{ .payload = try ip.getCoerced(gpa, val, error_union_type.payload_type) }, + } }), else => {}, } if (std.debug.runtime_safety) { @@ -4271,33 +4288,24 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind // big_int storage, the limbs would be invalidated before they are read. // Here we pre-reserve the limbs to ensure that the logic in `addInt` will // not use an invalidated limbs pointer. - switch (int.storage) { - .u64 => |x| return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = .{ .u64 = x }, - } }), - .i64 => |x| return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = .{ .i64 = x }, - } }), - - .big_int => |big_int| { + const new_storage: Key.Int.Storage = switch (int.storage) { + .u64, .i64, .lazy_align, .lazy_size => int.storage, + .big_int => |big_int| storage: { const positive = big_int.positive; const limbs = ip.limbsSliceToIndex(big_int.limbs); // This line invalidates the limbs slice, but the indexes computed in the // previous line are still correct. try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); - return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = .{ .big_int = .{ - .limbs = ip.limbsIndexToSlice(limbs), - .positive = positive, - } }, - } }); + break :storage .{ .big_int = .{ + .limbs = ip.limbsIndexToSlice(limbs), + .positive = positive, + } }; }, - - .lazy_align, .lazy_size => unreachable, - } + }; + return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = new_storage, + } }); } pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex { @@ -4345,25 +4353,68 @@ pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.Inferre return @intToEnum(Module.Fn.InferredErrorSet.Index, datas[@enumToInt(val)]).toOptional(); } -pub fn isPointerType(ip: InternPool, ty: Index) bool { - const tags = ip.items.items(.tag); - if (ty == .none) return false; - return switch (tags[@enumToInt(ty)]) { - .type_pointer, .type_slice => true, - else => false, +/// includes .comptime_int_type +pub fn isIntegerType(ip: InternPool, ty: Index) bool { + return switch (ty) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .comptime_int_type, + => true, + else => ip.indexToKey(ty) == .int_type, + }; +} + +/// does not include .enum_literal_type +pub fn isEnumType(ip: InternPool, ty: Index) bool { + return switch (ty) { + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + => true, + else => ip.indexToKey(ty) == .enum_type, }; } +pub fn isFunctionType(ip: InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .func_type; +} + +pub fn isPointerType(ip: InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .ptr_type; +} + pub fn isOptionalType(ip: InternPool, ty: Index) bool { - const tags = ip.items.items(.tag); - if (ty == .none) return false; - return tags[@enumToInt(ty)] == .type_optional; + return ip.indexToKey(ty) == .opt_type; +} + +/// includes .inferred_error_set_type +pub fn isErrorSetType(ip: InternPool, ty: Index) bool { + return ty == .anyerror_type or switch (ip.indexToKey(ty)) { + .error_set_type, .inferred_error_set_type => true, + else => false, + }; } pub fn isInferredErrorSetType(ip: InternPool, ty: Index) bool { - const tags = ip.items.items(.tag); - assert(ty != .none); - return tags[@enumToInt(ty)] == .type_inferred_error_set; + return ip.indexToKey(ty) == .inferred_error_set_type; +} + +pub fn isErrorUnionType(ip: InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .error_union_type; } /// The is only legal because the initializer is not part of the hash. diff --git a/src/Module.zig b/src/Module.zig index 3dd89f126925..0e57a0fc06ba 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6699,6 +6699,11 @@ pub fn intern(mod: *Module, key: InternPool.Key) Allocator.Error!InternPool.Inde return mod.intern_pool.get(mod.gpa, key); } +/// Shortcut for calling `intern_pool.getCoerced`. +pub fn getCoerced(mod: *Module, val: Value, new_ty: Type) Allocator.Error!Value { + return (try mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), new_ty.toIntern())).toValue(); +} + pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type { const i = try intern(mod, .{ .int_type = .{ .signedness = signedness, diff --git a/src/Sema.zig b/src/Sema.zig index 003481084628..51e58f2e7b78 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7821,7 +7821,6 @@ fn resolveGenericInstantiationType( const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst); const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable; const new_func = new_func_val.getFunctionIndex(mod).unwrap().?; - errdefer mod.destroyFunc(new_func); assert(new_func == new_module_func); arg_i = 0; @@ -10793,7 +10792,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError check_range: { if (operand_ty.zigTypeTag(mod) == .Int) { const min_int = try operand_ty.minInt(mod); - const max_int = try operand_ty.maxIntScalar(mod, Type.comptime_int); + const max_int = try operand_ty.maxInt(mod, operand_ty); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( @@ -11649,7 +11648,7 @@ const RangeSetUnhandledIterator = struct { fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const mod = sema.mod; const min = try ty.minInt(mod); - const max = try ty.maxIntScalar(mod, Type.comptime_int); + const max = try ty.maxInt(mod, ty); return RangeSetUnhandledIterator{ .sema = sema, @@ -15964,25 +15963,24 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } const args_val = v: { - const args_slice_ty = try mod.ptrType(.{ - .elem_type = param_info_ty.toIntern(), - .size = .Slice, - .is_const = true, + const new_decl_ty = try mod.arrayType(.{ + .len = param_vals.len, + .child = param_info_ty.toIntern(), }); const new_decl = try params_anon_decl.finish( - try mod.arrayType(.{ - .len = param_vals.len, - .child = param_info_ty.toIntern(), - .sentinel = .none, - }), + new_decl_ty, (try mod.intern(.{ .aggregate = .{ - .ty = args_slice_ty.toIntern(), + .ty = new_decl_ty.toIntern(), .storage = .{ .elems = param_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = args_slice_ty.toIntern(), + .ty = (try mod.ptrType(.{ + .elem_type = param_info_ty.toIntern(), + .size = .Slice, + .is_const = true, + })).toIntern(), .addr = .{ .decl = new_decl }, .len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(), } }); @@ -16214,7 +16212,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ .ty = type_info_ty.toIntern(), - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector))).toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Optional))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ .ty = optional_field_ty.toIntern(), .storage = .{ .elems = &field_values }, @@ -16258,7 +16256,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, - .sentinel = .zero_u8, }); const new_decl = try anon_decl.finish( new_decl_ty, @@ -16269,8 +16266,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_sentinel_0_type, + .ty = .slice_const_u8_type, .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); }; @@ -16386,7 +16384,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, - .sentinel = .zero_u8, }); const new_decl = try anon_decl.finish( new_decl_ty, @@ -16397,8 +16394,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_sentinel_0_type, + .ty = .slice_const_u8_type, .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); }; @@ -16521,7 +16519,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, - .sentinel = .zero_u8, }); const new_decl = try anon_decl.finish( new_decl_ty, @@ -16532,8 +16529,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_sentinel_0_type, + .ty = .slice_const_u8_type, .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); }; @@ -16663,12 +16661,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |tuple| { struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len); - for ( - tuple.types, - tuple.values, - struct_field_vals, - 0.., - ) |field_ty, field_val, *struct_field_val, i| { + for (struct_field_vals, 0..) |*struct_field_val, i| { + const anon_struct_type = mod.intern_pool.indexToKey(struct_ty.toIntern()).anon_struct_type; + const field_ty = anon_struct_type.types[i]; + const field_val = anon_struct_type.values[i]; const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16735,7 +16731,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, - .sentinel = .zero_u8, }); const new_decl = try anon_decl.finish( new_decl_ty, @@ -16746,7 +16741,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_sentinel_0_type, + .ty = .slice_const_u8_type, .addr = .{ .decl = new_decl }, .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); @@ -16975,7 +16970,6 @@ fn typeInfoNamespaceDecls( const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, - .sentinel = .zero_u8, }); const new_decl = try anon_decl.finish( new_decl_ty, @@ -16986,7 +16980,7 @@ fn typeInfoNamespaceDecls( 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_sentinel_0_type, + .ty = .slice_const_u8_type, .addr = .{ .decl = new_decl }, .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); @@ -20404,7 +20398,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .val = operand_val.toIntern(), } })).toValue()); } - return sema.addConstant(aligned_dest_ty, operand_val); + return sema.addConstant(aligned_dest_ty, try mod.getCoerced(operand_val, aligned_dest_ty)); } try sema.requireRuntimeBlock(block, src, null); @@ -22401,7 +22395,7 @@ fn analyzeMinMax( if (std.debug.runtime_safety) { assert(try sema.intFitsInType(val, refined_ty, null)); } - cur_minmax = try sema.addConstant(refined_ty, val); + cur_minmax = try sema.addConstant(refined_ty, try mod.getCoerced(val, refined_ty)); } break :refined refined_ty; @@ -22459,8 +22453,8 @@ fn analyzeMinMax( else => unreachable, }; const max_val = switch (air_tag) { - .min => try comptime_elem_ty.maxInt(mod, Type.comptime_int), // @min(ct, rt) <= ct - .max => try unrefined_elem_ty.maxInt(mod, Type.comptime_int), + .min => try comptime_elem_ty.maxInt(mod, comptime_elem_ty), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(mod, unrefined_elem_ty), else => unreachable, }; @@ -23356,11 +23350,14 @@ fn zirBuiltinExtern( try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); try sema.ensureDeclAnalyzed(new_decl_index); - const ref = try mod.intern(.{ .ptr = .{ - .ty = (try mod.singleConstPtrType(ty)).toIntern(), + return sema.addConstant(ty, try mod.getCoerced((try mod.intern(.{ .ptr = .{ + .ty = switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => ty.toIntern(), + .opt_type => |child_type| child_type, + else => unreachable, + }, .addr = .{ .decl = new_decl_index }, - } }); - return sema.addConstant(ty, ref.toValue()); + } })).toValue(), ty)); } fn zirWorkItem( @@ -25887,13 +25884,7 @@ fn coerceExtra( var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (maybe_inst_val) |val| { - if (val.ip_index == .none) { - // Keep the comptime Value representation; take the new type. - return sema.addConstant(dest_ty, val); - } else { - const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.toIntern(), dest_ty.toIntern()); - return sema.addConstant(dest_ty, new_val.toValue()); - } + return sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addBitCast(dest_ty, inst); @@ -26269,8 +26260,7 @@ fn coerceExtra( if (!opts.report_err) return error.NotCoercible; return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }); } - const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.toIntern(), dest_ty.toIntern()); - return try sema.addConstant(dest_ty, new_val.toValue()); + return try sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; @@ -27222,68 +27212,84 @@ fn coerceInMemoryAllowedFns( src_src: LazySrcLoc, ) !InMemoryCoercionResult { const mod = sema.mod; - const dest_info = mod.typeToFunc(dest_ty).?; - const src_info = mod.typeToFunc(src_ty).?; - if (dest_info.is_var_args != src_info.is_var_args) { - return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args }; - } + { + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; - if (dest_info.is_generic != src_info.is_generic) { - return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic }; - } + if (dest_info.is_var_args != src_info.is_var_args) { + return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args }; + } - if (dest_info.cc != src_info.cc) { - return InMemoryCoercionResult{ .fn_cc = .{ - .actual = src_info.cc, - .wanted = dest_info.cc, - } }; - } + if (dest_info.is_generic != src_info.is_generic) { + return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic }; + } - if (src_info.return_type != .noreturn_type) { - const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type.toType(), src_info.return_type.toType(), false, target, dest_src, src_src); - if (rt != .ok) { - return InMemoryCoercionResult{ .fn_return_type = .{ - .child = try rt.dupe(sema.arena), - .actual = src_info.return_type.toType(), - .wanted = dest_info.return_type.toType(), + if (dest_info.cc != src_info.cc) { + return InMemoryCoercionResult{ .fn_cc = .{ + .actual = src_info.cc, + .wanted = dest_info.cc, } }; } - } - if (dest_info.param_types.len != src_info.param_types.len) { - return InMemoryCoercionResult{ .fn_param_count = .{ - .actual = src_info.param_types.len, - .wanted = dest_info.param_types.len, - } }; + if (src_info.return_type != .noreturn_type) { + const dest_return_type = dest_info.return_type.toType(); + const src_return_type = src_info.return_type.toType(); + const rt = try sema.coerceInMemoryAllowed(block, dest_return_type, src_return_type, false, target, dest_src, src_src); + if (rt != .ok) { + return InMemoryCoercionResult{ .fn_return_type = .{ + .child = try rt.dupe(sema.arena), + .actual = dest_return_type, + .wanted = src_return_type, + } }; + } + } } - if (dest_info.noalias_bits != src_info.noalias_bits) { - return InMemoryCoercionResult{ .fn_param_noalias = .{ - .actual = src_info.noalias_bits, - .wanted = dest_info.noalias_bits, - } }; - } + const params_len = params_len: { + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; + + if (dest_info.param_types.len != src_info.param_types.len) { + return InMemoryCoercionResult{ .fn_param_count = .{ + .actual = src_info.param_types.len, + .wanted = dest_info.param_types.len, + } }; + } - for (dest_info.param_types, 0..) |dest_param_ty, i| { - const src_param_ty = src_info.param_types[i].toType(); + if (dest_info.noalias_bits != src_info.noalias_bits) { + return InMemoryCoercionResult{ .fn_param_noalias = .{ + .actual = src_info.noalias_bits, + .wanted = dest_info.noalias_bits, + } }; + } + + break :params_len dest_info.param_types.len; + }; + + for (0..params_len) |param_i| { + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; - const i_small = @intCast(u5, i); - if (dest_info.paramIsComptime(i_small) != src_info.paramIsComptime(i_small)) { + const dest_param_ty = dest_info.param_types[param_i].toType(); + const src_param_ty = src_info.param_types[param_i].toType(); + + const param_i_small = @intCast(u5, param_i); + if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) { return InMemoryCoercionResult{ .fn_param_comptime = .{ - .index = i, - .wanted = dest_info.paramIsComptime(i_small), + .index = param_i, + .wanted = dest_info.paramIsComptime(param_i_small), } }; } // Note: Cast direction is reversed here. - const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty.toType(), false, target, dest_src, src_src); + const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src); if (param != .ok) { return InMemoryCoercionResult{ .fn_param = .{ .child = try param.dupe(sema.arena), .actual = src_param_ty, - .wanted = dest_param_ty.toType(), - .index = i, + .wanted = dest_param_ty, + .index = param_i, } }; } } @@ -28385,7 +28391,7 @@ fn beginComptimePtrLoad( }; }, .elem => |elem_ptr| blk: { - const elem_ty = ptr.ty.toType().childType(mod); + const elem_ty = ptr.ty.toType().elemType2(mod); var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null); // This code assumes that elem_ptrs have been "flattened" in order for direct dereference @@ -28678,11 +28684,10 @@ fn coerceCompatiblePtrs( return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } // The comptime Value representation is compatible with both types. - return sema.addConstant(dest_ty, (try mod.intern_pool.getCoerced( - sema.gpa, - try val.intern(inst_ty, mod), - dest_ty.toIntern(), - )).toValue()); + return sema.addConstant( + dest_ty, + try mod.getCoerced((try val.intern(inst_ty, mod)).toValue(), dest_ty), + ); } try sema.requireRuntimeBlock(block, inst_src, null); const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod); @@ -29390,9 +29395,13 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value { const mod = sema.mod; + const ptr_anyopaque_ty = try mod.singleConstPtrType(Type.anyopaque); return (try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType((try mod.singleConstPtrType(Type.anyopaque)).toIntern())).toIntern(), - .val = if (opt_val) |val| (try sema.refValue(block, ty, val)).toIntern() else .none, + .ty = (try mod.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(), + .val = if (opt_val) |val| (try mod.getCoerced( + try sema.refValue(block, ty, val), + ptr_anyopaque_ty, + )).toIntern() else .none, } })).toValue(); } @@ -30051,11 +30060,10 @@ fn analyzeSlice( }; if (!new_ptr_val.isUndef(mod)) { - return sema.addConstant(return_ty, (try mod.intern_pool.getCoerced( - sema.gpa, - try new_ptr_val.intern(new_ptr_ty, mod), - return_ty.toIntern(), - )).toValue()); + return sema.addConstant(return_ty, try mod.getCoerced( + (try new_ptr_val.intern(new_ptr_ty, mod)).toValue(), + return_ty, + )); } // Special case: @as([]i32, undefined)[x..x] @@ -34237,9 +34245,9 @@ fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { // The `tagValueIndex` function call below relies on the type being the integer tag type. // `getCoerced` assumes the value will fit the new type. if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false; - const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.toIntern(), enum_type.tag_ty); + const int_coerced = try mod.getCoerced(int, enum_type.tag_ty.toType()); - return enum_type.tagValueIndex(&mod.intern_pool, int_coerced) != null; + return enum_type.tagValueIndex(&mod.intern_pool, int_coerced.toIntern()) != null; } fn intAddWithOverflow( diff --git a/src/codegen.zig b/src/codegen.zig index 87aea6c245b2..6dbf3f847a21 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -185,7 +185,7 @@ pub fn generateSymbol( const mod = bin_file.options.module.?; var typed_value = arg_tv; - switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { .runtime_value => |rt| typed_value.val = rt.val.toValue(), else => {}, } @@ -204,7 +204,7 @@ pub fn generateSymbol( return .ok; } - switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { .int_type, .ptr_type, .array_type, @@ -282,7 +282,7 @@ pub fn generateSymbol( switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_ty, .val = switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }), .payload => |payload| payload, }.toValue(), }, code, debug_output, reloc_info)) { @@ -315,7 +315,7 @@ pub fn generateSymbol( const int_tag_ty = try typed_value.ty.intTagType(mod); switch (try generateSymbol(bin_file, src_loc, .{ .ty = int_tag_ty, - .val = (try mod.intern_pool.getCoerced(mod.gpa, enum_tag.int, int_tag_ty.ip_index)).toValue(), + .val = try mod.getCoerced(enum_tag.int.toValue(), int_tag_ty), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return .{ .fail = em }, @@ -337,7 +337,7 @@ pub fn generateSymbol( switch (try lowerParentPtr(bin_file, src_loc, switch (ptr.len) { .none => typed_value.val, else => typed_value.val.slicePtr(mod), - }.ip_index, code, debug_output, reloc_info)) { + }.toIntern(), code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return .{ .fail = em }, } @@ -372,7 +372,7 @@ pub fn generateSymbol( } else { const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; if (payload_type.hasRuntimeBits(mod)) { - const value = payload_val orelse (try mod.intern(.{ .undef = payload_type.ip_index })).toValue(); + const value = payload_val orelse (try mod.intern(.{ .undef = payload_type.toIntern() })).toValue(); switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, .val = value, @@ -385,7 +385,7 @@ pub fn generateSymbol( try code.writer().writeByteNTimes(0, padding); } }, - .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.ip_index)) { + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.toIntern())) { .array_type => |array_type| { var index: u64 = 0; while (index < array_type.len) : (index += 1) { @@ -850,7 +850,7 @@ pub fn genTypedValue( ) CodeGenError!GenResult { const mod = bin_file.options.module.?; var typed_value = arg_tv; - switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { .runtime_value => |rt| typed_value.val = rt.val.toValue(), else => {}, } @@ -866,7 +866,7 @@ pub fn genTypedValue( const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); - if (!typed_value.ty.isSlice(mod)) switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + if (!typed_value.ty.isSlice(mod)) switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl => |decl| return genDeclRef(bin_file, src_loc, typed_value, decl), .mut_decl => |mut_decl| return genDeclRef(bin_file, src_loc, typed_value, mut_decl.decl), @@ -879,12 +879,12 @@ pub fn genTypedValue( .Void => return GenResult.mcv(.none), .Pointer => switch (typed_value.ty.ptrSize(mod)) { .Slice => {}, - else => switch (typed_value.val.ip_index) { + else => switch (typed_value.val.toIntern()) { .null_value => { return GenResult.mcv(.{ .immediate = 0 }); }, .none => {}, - else => switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { .int => { return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) }); }, @@ -916,7 +916,7 @@ pub fn genTypedValue( } }, .Enum => { - const enum_tag = mod.intern_pool.indexToKey(typed_value.val.ip_index).enum_tag; + const enum_tag = mod.intern_pool.indexToKey(typed_value.val.toIntern()).enum_tag; const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); return genTypedValue(bin_file, src_loc, .{ .ty = int_tag_ty.toType(), @@ -924,7 +924,9 @@ pub fn genTypedValue( }, owner_decl_index); }, .ErrorSet => { - const err_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(typed_value.val.ip_index).err.name); + const err_name = mod.intern_pool.stringToSlice( + mod.intern_pool.indexToKey(typed_value.val.toIntern()).err.name, + ); const global_error_set = mod.global_error_set; const error_index = global_error_set.get(err_name).?; return GenResult.mcv(.{ .immediate = error_index }); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index dd07b5edbd09..d4b7e8eb3d06 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2329,7 +2329,7 @@ pub const Object = struct { try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } - for (fn_info.param_types) |param_ty| { + for (mod.typeToFunc(ty).?.param_types) |param_ty| { if (!param_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; if (isByRef(param_ty.toType(), mod)) { diff --git a/src/type.zig b/src/type.zig index 21e7ced3f496..5d6f77adf2e6 100644 --- a/src/type.zig +++ b/src/type.zig @@ -23,7 +23,7 @@ pub const Type = struct { } pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => .Int, .ptr_type => .Pointer, .array_type => .Array, @@ -170,7 +170,7 @@ pub const Type = struct { /// Asserts the type is a pointer. pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { - return !mod.intern_pool.indexToKey(ty.ip_index).ptr_type.is_const; + return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.is_const; } pub const ArrayInfo = struct { @@ -199,26 +199,23 @@ pub const Type = struct { } pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { - return Payload.Pointer.Data.fromKey(ptrInfoIp(mod.intern_pool, ty.ip_index)); + return Payload.Pointer.Data.fromKey(ptrInfoIp(mod.intern_pool, ty.toIntern())); } pub fn eql(a: Type, b: Type, mod: *const Module) bool { _ = mod; // TODO: remove this parameter - assert(a.ip_index != .none); - assert(b.ip_index != .none); // The InternPool data structure hashes based on Key to make interned objects // unique. An Index can be treated simply as u32 value for the // purpose of Type/Value hashing and equality. - return a.ip_index == b.ip_index; + return a.toIntern() == b.toIntern(); } pub fn hash(ty: Type, mod: *const Module) u32 { _ = mod; // TODO: remove this parameter - assert(ty.ip_index != .none); // The InternPool data structure hashes based on Key to make interned objects // unique. An Index can be treated simply as u32 value for the // purpose of Type/Value hashing and equality. - return std.hash.uint32(@enumToInt(ty.ip_index)); + return std.hash.uint32(@enumToInt(ty.toIntern())); } pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { @@ -280,7 +277,7 @@ pub const Type = struct { /// Prints a name suitable for `@typeName`. pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| { const sign_char: u8 = switch (int_type.signedness) { .signed => 'i', @@ -520,10 +517,10 @@ pub const Type = struct { ignore_comptime_only: bool, strat: AbiAlignmentAdvancedStrat, ) RuntimeBitsError!bool { - return switch (ty.ip_index) { + return switch (ty.toIntern()) { // False because it is a comptime-only type. .empty_struct_type => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| int_type.bits != 0, .ptr_type => |ptr_type| { // Pointers to zero-bit types still have a runtime address; however, pointers @@ -710,7 +707,7 @@ pub const Type = struct { /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type, .ptr_type, .vector_type, @@ -847,7 +844,7 @@ pub const Type = struct { } pub fn isNoReturn(ty: Type, mod: *Module) bool { - return if (ty.ip_index != .none) mod.intern_pool.isNoReturn(ty.ip_index) else false; + return mod.intern_pool.isNoReturn(ty.toIntern()); } /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. @@ -856,7 +853,7 @@ pub const Type = struct { } pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| { if (ptr_type.alignment.toByteUnitsOptional()) |a| { return @intCast(u32, a); @@ -873,7 +870,7 @@ pub const Type = struct { } pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| ptr_type.address_space, .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.address_space, else => unreachable, @@ -923,9 +920,9 @@ pub const Type = struct { else => null, }; - switch (ty.ip_index) { + switch (ty.toIntern()) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| { if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) }; @@ -1040,7 +1037,7 @@ pub const Type = struct { .sema => unreachable, // handled above .lazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.ip_index }, + .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, }; if (struct_obj.layout == .Packed) { @@ -1048,7 +1045,7 @@ pub const Type = struct { .sema => |sema| try sema.resolveTypeLayout(ty), .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.ip_index }, + .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, .eager => {}, } @@ -1062,7 +1059,7 @@ pub const Type = struct { if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.ip_index }, + .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, else => |e| return e, })) continue; @@ -1076,7 +1073,7 @@ pub const Type = struct { .sema => unreachable, // handled above .lazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.ip_index }, + .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, }, }; @@ -1106,7 +1103,7 @@ pub const Type = struct { .sema => unreachable, // passed to abiAlignmentAdvanced above .lazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.ip_index }, + .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, }, } @@ -1157,7 +1154,7 @@ pub const Type = struct { if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.ip_index }, + .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, else => |e| return e, })) { @@ -1179,7 +1176,7 @@ pub const Type = struct { } return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.ip_index }, + .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }; }, } @@ -1205,7 +1202,7 @@ pub const Type = struct { if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.ip_index }, + .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, else => |e| return e, })) { @@ -1217,7 +1214,7 @@ pub const Type = struct { .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) }, .val => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.ip_index }, + .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, }, } @@ -1249,7 +1246,7 @@ pub const Type = struct { .sema => unreachable, // handled above .lazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.ip_index }, + .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, }; if (union_obj.fields.count() == 0) { @@ -1266,7 +1263,7 @@ pub const Type = struct { if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.ip_index }, + .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, else => |e| return e, })) continue; @@ -1280,7 +1277,7 @@ pub const Type = struct { .sema => unreachable, // handled above .lazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_align = ty.ip_index }, + .storage = .{ .lazy_align = ty.toIntern() }, } })).toValue() }, }, }; @@ -1321,10 +1318,10 @@ pub const Type = struct { ) Module.CompileError!AbiSizeAdvanced { const target = mod.getTarget(); - switch (ty.ip_index) { + switch (ty.toIntern()) { .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| { if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target) }; @@ -1343,7 +1340,7 @@ pub const Type = struct { .sema, .eager => unreachable, .lazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.ip_index }, + .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, }, } @@ -1354,7 +1351,7 @@ pub const Type = struct { .eager => null, .lazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.ip_index }, + .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, }; const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); @@ -1365,7 +1362,7 @@ pub const Type = struct { .scalar => |x| x, .val => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.ip_index }, + .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, }; const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); @@ -1385,7 +1382,7 @@ pub const Type = struct { if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.ip_index }, + .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, else => |e| return e, })) { @@ -1401,7 +1398,7 @@ pub const Type = struct { .eager => unreachable, .lazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.ip_index }, + .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, }, }; @@ -1489,7 +1486,7 @@ pub const Type = struct { .sema => |sema| try sema.resolveTypeLayout(ty), .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.ip_index }, + .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, .eager => {}, } @@ -1504,7 +1501,7 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = 0 }; if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.ip_index }, + .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }; }, .eager => {}, @@ -1568,7 +1565,7 @@ pub const Type = struct { .sema => |sema| try sema.resolveTypeLayout(ty), .lazy => if (!union_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.ip_index }, + .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, .eager => {}, } @@ -1589,7 +1586,7 @@ pub const Type = struct { if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.ip_index }, + .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, else => |e| return e, })) return AbiSizeAdvanced{ .scalar = 1 }; @@ -1605,7 +1602,7 @@ pub const Type = struct { .eager => unreachable, .lazy => return .{ .val = (try mod.intern(.{ .int = .{ .ty = .comptime_int_type, - .storage = .{ .lazy_size = ty.ip_index }, + .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, }, }; @@ -1647,7 +1644,7 @@ pub const Type = struct { const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| return int_type.bits, .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice => return target.ptrBitWidth() * 2, @@ -1820,7 +1817,7 @@ pub const Type = struct { } pub fn isSinglePointer(ty: Type, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_info| ptr_info.size == .One, else => false, }; @@ -1833,33 +1830,27 @@ pub const Type = struct { /// Returns `null` if `ty` is not a pointer. pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_info| ptr_info.size, else => null, }; } pub fn isSlice(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { - .none => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| ptr_type.size == .Slice, - else => false, - }, + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.size == .Slice, + else => false, }; } pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type { - return mod.intern_pool.slicePtrType(ty.ip_index).toType(); + return mod.intern_pool.slicePtrType(ty.toIntern()).toType(); } pub fn isConstPtr(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { - .none => false, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| ptr_type.is_const, - else => false, - }, + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.is_const, + else => false, }; } @@ -1868,53 +1859,41 @@ pub const Type = struct { } pub fn isVolatilePtrIp(ty: Type, ip: InternPool) bool { - return switch (ty.ip_index) { - .none => false, - else => switch (ip.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| ptr_type.is_volatile, - else => false, - }, + return switch (ip.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.is_volatile, + else => false, }; } pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { - .none => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| ptr_type.is_allowzero, - .opt_type => true, - else => false, - }, + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.is_allowzero, + .opt_type => true, + else => false, }; } pub fn isCPtr(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { - .none => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| ptr_type.size == .C, - else => false, - }, + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.size == .C, + else => false, }; } pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { - .none => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| switch (ptr_type.size) { - .Slice => false, - .One, .Many, .C => true, - }, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |p| switch (p.size) { - .Slice, .C => false, - .Many, .One => !p.is_allowzero, - }, - else => false, + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => false, + .One, .Many, .C => true, + }, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| switch (p.size) { + .Slice, .C => false, + .Many, .One => !p.is_allowzero, }, else => false, }, + else => false, }; } @@ -1929,22 +1908,19 @@ pub const Type = struct { /// See also `isPtrLikeOptional`. pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { - .none => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .opt_type => |child| switch (child.toType().zigTypeTag(mod)) { - .Pointer => { - const info = child.toType().ptrInfo(mod); - return switch (info.size) { - .C => false, - else => !info.@"allowzero", - }; - }, - .ErrorSet => true, - else => false, + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .opt_type => |child| switch (child.toType().zigTypeTag(mod)) { + .Pointer => { + const info = child.toType().ptrInfo(mod); + return switch (info.size) { + .C => false, + else => !info.@"allowzero", + }; }, + .ErrorSet => true, else => false, }, + else => false, }; } @@ -1952,19 +1928,16 @@ pub const Type = struct { /// address value, using 0 for null. Note that this returns true for C pointers. /// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`. pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { - .none => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| ptr_type.size == .C, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |ptr_type| switch (ptr_type.size) { - .Slice, .C => false, - .Many, .One => !ptr_type.is_allowzero, - }, - else => false, + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| ptr_type.size == .C, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice, .C => false, + .Many, .One => !ptr_type.is_allowzero, }, else => false, }, + else => false, }; } @@ -1976,7 +1949,7 @@ pub const Type = struct { } pub fn childTypeIp(ty: Type, ip: InternPool) Type { - return ip.childType(ty.ip_index).toType(); + return ip.childType(ty.toIntern()).toType(); } /// For *[N]T, returns T. @@ -1989,7 +1962,7 @@ pub const Type = struct { /// For []T, returns T. /// For anyframe->T, returns T. pub fn elemType2(ty: Type, mod: *const Module) Type { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.size) { .One => ptr_type.elem_type.toType().shallowElemType(mod), .Many, .C, .Slice => ptr_type.elem_type.toType(), @@ -2023,7 +1996,7 @@ pub const Type = struct { /// Asserts that the type is an optional. /// Note that for C pointers this returns the type unmodified. pub fn optionalChild(ty: Type, mod: *const Module) Type { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .opt_type => |child| child.toType(), .ptr_type => |ptr_type| b: { assert(ptr_type.size == .C); @@ -2036,7 +2009,7 @@ pub const Type = struct { /// Returns the tag type of a union, if the type is a union and it has a tag type. /// Otherwise, returns `null`. pub fn unionTagType(ty: Type, mod: *Module) ?Type { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .union_type => |union_type| switch (union_type.runtime_tag) { .tagged => { const union_obj = mod.unionPtr(union_type.index); @@ -2052,7 +2025,7 @@ pub const Type = struct { /// Same as `unionTagType` but includes safety tag. /// Codegen should use this version. pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .union_type => |union_type| { if (!union_type.hasTag()) return null; const union_obj = mod.unionPtr(union_type.index); @@ -2097,13 +2070,13 @@ pub const Type = struct { } pub fn unionGetLayout(ty: Type, mod: *Module) Module.Union.Layout { - const union_type = mod.intern_pool.indexToKey(ty.ip_index).union_type; + const union_type = mod.intern_pool.indexToKey(ty.toIntern()).union_type; const union_obj = mod.unionPtr(union_type.index); return union_obj.getLayout(mod, union_type.hasTag()); } pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto; return struct_obj.layout; @@ -2119,19 +2092,19 @@ pub const Type = struct { /// Asserts that the type is an error union. pub fn errorUnionPayload(ty: Type, mod: *Module) Type { - return mod.intern_pool.indexToKey(ty.ip_index).error_union_type.payload_type.toType(); + return mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type.toType(); } /// Asserts that the type is an error union. pub fn errorUnionSet(ty: Type, mod: *Module) Type { - return mod.intern_pool.indexToKey(ty.ip_index).error_union_type.error_set_type.toType(); + return mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.error_set_type.toType(); } /// Returns false for unresolved inferred error sets. pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .anyerror_type => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .error_set_type => |error_set_type| error_set_type.names.len == 0, .inferred_error_set_type => |index| { const inferred_error_set = mod.inferredErrorSetPtr(index); @@ -2149,9 +2122,9 @@ pub const Type = struct { /// Note that the result may be a false negative if the type did not get error set /// resolution prior to this call. pub fn isAnyError(ty: Type, mod: *Module) bool { - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .anyerror_type => true, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .inferred_error_set_type => |i| mod.inferredErrorSetPtr(i).is_anyerror, else => false, }, @@ -2194,9 +2167,9 @@ pub const Type = struct { /// resolved yet. pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool { const ip = &mod.intern_pool; - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .anyerror_type => true, - else => switch (ip.indexToKey(ty.ip_index)) { + else => switch (ip.indexToKey(ty.toIntern())) { .error_set_type => |error_set_type| { // If the string is not interned, then the field certainly is not present. const field_name_interned = ip.getString(name).unwrap() orelse return false; @@ -2220,7 +2193,7 @@ pub const Type = struct { } pub fn arrayLenIp(ty: Type, ip: InternPool) u64 { - return switch (ip.indexToKey(ty.ip_index)) { + return switch (ip.indexToKey(ty.toIntern())) { .vector_type => |vector_type| vector_type.len, .array_type => |array_type| array_type.len, .struct_type => |struct_type| { @@ -2238,7 +2211,7 @@ pub const Type = struct { } pub fn vectorLen(ty: Type, mod: *const Module) u32 { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .vector_type => |vector_type| vector_type.len, .anon_struct_type => |tuple| @intCast(u32, tuple.types.len), else => unreachable, @@ -2247,7 +2220,7 @@ pub const Type = struct { /// Asserts the type is an array, pointer or vector. pub fn sentinel(ty: Type, mod: *const Module) ?Value { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .vector_type, .struct_type, .anon_struct_type, @@ -2267,10 +2240,9 @@ pub const Type = struct { /// Returns true if and only if the type is a fixed-width, signed integer. pub fn isSignedInt(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .c_char_type, .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true, - .none => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| int_type.signedness == .signed, else => false, }, @@ -2279,10 +2251,9 @@ pub const Type = struct { /// Returns true if and only if the type is a fixed-width, unsigned integer. pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true, - .none => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| int_type.signedness == .unsigned, else => false, }, @@ -2304,7 +2275,7 @@ pub const Type = struct { const target = mod.getTarget(); var ty = starting_ty; - while (true) switch (ty.ip_index) { + while (true) switch (ty.toIntern()) { .anyerror_type => { // TODO revisit this when error sets support custom int types return .{ .signedness = .unsigned, .bits = 16 }; @@ -2320,7 +2291,7 @@ pub const Type = struct { .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, .c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| return int_type, .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; @@ -2370,7 +2341,7 @@ pub const Type = struct { } pub fn isNamedInt(ty: Type) bool { - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .usize_type, .isize_type, .c_char_type, @@ -2390,7 +2361,7 @@ pub const Type = struct { /// Returns `false` for `comptime_float`. pub fn isRuntimeFloat(ty: Type) bool { - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .f16_type, .f32_type, .f64_type, @@ -2405,7 +2376,7 @@ pub const Type = struct { /// Returns `true` for `comptime_float`. pub fn isAnyFloat(ty: Type) bool { - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .f16_type, .f32_type, .f64_type, @@ -2422,7 +2393,7 @@ pub const Type = struct { /// Asserts the type is a fixed-size float or comptime_float. /// Returns 128 for comptime_float types. pub fn floatBits(ty: Type, target: Target) u16 { - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .f16_type => 16, .f32_type => 32, .f64_type => 64, @@ -2440,7 +2411,7 @@ pub const Type = struct { } pub fn fnReturnTypeIp(ty: Type, ip: InternPool) Type { - return switch (ip.indexToKey(ty.ip_index)) { + return switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type, .func_type => |func_type| func_type.return_type, else => unreachable, @@ -2449,7 +2420,7 @@ pub const Type = struct { /// Asserts the type is a function. pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention { - return mod.intern_pool.indexToKey(ty.ip_index).func_type.cc; + return mod.intern_pool.indexToKey(ty.toIntern()).func_type.cc; } pub fn isValidParamType(self: Type, mod: *const Module) bool { @@ -2468,11 +2439,11 @@ pub const Type = struct { /// Asserts the type is a function. pub fn fnIsVarArgs(ty: Type, mod: *Module) bool { - return mod.intern_pool.indexToKey(ty.ip_index).func_type.is_var_args; + return mod.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args; } pub fn isNumeric(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .f16_type, .f32_type, .f64_type, @@ -2494,9 +2465,7 @@ pub const Type = struct { .c_ulonglong_type, => true, - .none => false, - - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => true, else => false, }, @@ -2508,10 +2477,10 @@ pub const Type = struct { pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { var ty = starting_type; - while (true) switch (ty.ip_index) { + while (true) switch (ty.toIntern()) { .empty_struct_type => return Value.empty_struct, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| { if (int_type.bits == 0) { return try mod.intValue(ty, 0); @@ -2530,13 +2499,13 @@ pub const Type = struct { inline .array_type, .vector_type => |seq_type| { if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, } })).toValue(); if (try seq_type.child.toType().onePossibleValue(mod)) |opv| { return (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, - .storage = .{ .repeated_elem = opv.ip_index }, + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = opv.toIntern() }, } })).toValue(); } return null; @@ -2612,7 +2581,7 @@ pub const Type = struct { // This TODO is repeated in the redundant implementation of // one-possible-value logic in Sema.zig. const empty = try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, } }); return empty.toValue(); @@ -2625,7 +2594,7 @@ pub const Type = struct { // In this case the struct has all comptime-known fields and // therefore has one possible value. return (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = tuple.values }, } })).toValue(); }, @@ -2637,9 +2606,9 @@ pub const Type = struct { const only_field = union_obj.fields.values()[0]; const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; const only = try mod.intern(.{ .un = .{ - .ty = ty.ip_index, - .tag = tag_val.ip_index, - .val = val_val.ip_index, + .ty = ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val_val.toIntern(), } }); return only.toValue(); }, @@ -2650,8 +2619,8 @@ pub const Type = struct { if (try enum_type.tag_ty.toType().onePossibleValue(mod)) |int_opv| { const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.ip_index, - .int = int_opv.ip_index, + .ty = ty.toIntern(), + .int = int_opv.toIntern(), } }); return only.toValue(); } @@ -2663,7 +2632,7 @@ pub const Type = struct { 1 => { if (enum_type.values.len == 0) { const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = try mod.intern(.{ .int = .{ .ty = enum_type.tag_ty, .storage = .{ .u64 = 0 }, @@ -2705,10 +2674,10 @@ pub const Type = struct { /// TODO merge these implementations together with the "advanced" pattern seen /// elsewhere in this file. pub fn comptimeOnly(ty: Type, mod: *Module) bool { - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .empty_struct_type => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => false, .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); @@ -2880,8 +2849,7 @@ pub const Type = struct { /// Returns null if the type has no namespace. pub fn getNamespaceIndex(ty: Type, mod: *Module) Module.Namespace.OptionalIndex { - if (ty.ip_index == .none) return .none; - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), .struct_type => |struct_type| struct_type.namespace, .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), @@ -2900,8 +2868,8 @@ pub const Type = struct { pub fn minInt(ty: Type, mod: *Module) !Value { const scalar = try minIntScalar(ty.scalarType(mod), mod); return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, - .storage = .{ .repeated_elem = scalar.ip_index }, + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = scalar.toIntern() }, } })).toValue() else scalar; } @@ -2929,8 +2897,8 @@ pub const Type = struct { pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty); return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, - .storage = .{ .repeated_elem = scalar.ip_index }, + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = scalar.toIntern() }, } })).toValue() else scalar; } @@ -2971,7 +2939,7 @@ pub const Type = struct { /// Asserts the type is an enum or a union. pub fn intTagType(ty: Type, mod: *Module) !Type { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .union_type => |union_type| mod.unionPtr(union_type.index).tag_ty.intTagType(mod), .enum_type => |enum_type| enum_type.tag_ty.toType(), else => unreachable, @@ -2979,21 +2947,18 @@ pub const Type = struct { } pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool { - return switch (ty.ip_index) { - .none => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .enum_type => |enum_type| switch (enum_type.tag_mode) { - .nonexhaustive => true, - .auto, .explicit => false, - }, - else => false, + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .nonexhaustive => true, + .auto, .explicit => false, }, + else => false, }; } // Asserts that `ty` is an error set and not `anyerror`. pub fn errorSetNames(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .error_set_type => |x| x.names, .inferred_error_set_type => |index| { const inferred_error_set = mod.inferredErrorSetPtr(index); @@ -3006,22 +2971,22 @@ pub const Type = struct { } pub fn enumFields(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { - return mod.intern_pool.indexToKey(ty.ip_index).enum_type.names; + return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names; } pub fn enumFieldCount(ty: Type, mod: *Module) usize { - return mod.intern_pool.indexToKey(ty.ip_index).enum_type.names.len; + return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names.len; } pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) [:0]const u8 { const ip = &mod.intern_pool; - const field_name = ip.indexToKey(ty.ip_index).enum_type.names[field_index]; + const field_name = ip.indexToKey(ty.toIntern()).enum_type.names[field_index]; return ip.stringToSlice(field_name); } pub fn enumFieldIndex(ty: Type, field_name: []const u8, mod: *Module) ?u32 { const ip = &mod.intern_pool; - const enum_type = ip.indexToKey(ty.ip_index).enum_type; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; // If the string is not interned, then the field certainly is not present. const field_name_interned = ip.getString(field_name).unwrap() orelse return null; return enum_type.nameIndex(ip, field_name_interned); @@ -3032,9 +2997,9 @@ pub const Type = struct { /// declaration order, or `null` if `enum_tag` does not match any field. pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { const ip = &mod.intern_pool; - const enum_type = ip.indexToKey(ty.ip_index).enum_type; - const int_tag = switch (ip.indexToKey(enum_tag.ip_index)) { - .int => enum_tag.ip_index, + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; + const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) { + .int => enum_tag.toIntern(), .enum_tag => |info| info.int, else => unreachable, }; @@ -3043,7 +3008,7 @@ pub const Type = struct { } pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields { - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .{}; assert(struct_obj.haveFieldTypes()); @@ -3054,7 +3019,7 @@ pub const Type = struct { } pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) []const u8 { - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.haveFieldTypes()); @@ -3069,7 +3034,7 @@ pub const Type = struct { } pub fn structFieldCount(ty: Type, mod: *Module) usize { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; assert(struct_obj.haveFieldTypes()); @@ -3082,7 +3047,7 @@ pub const Type = struct { /// Supports structs and unions. pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; return struct_obj.fields.values()[index].ty; @@ -3097,7 +3062,7 @@ pub const Type = struct { } pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 { - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.layout != .Packed); @@ -3115,7 +3080,7 @@ pub const Type = struct { } pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; return struct_obj.fields.values()[index].default_val; @@ -3131,7 +3096,7 @@ pub const Type = struct { } pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; const field = struct_obj.fields.values()[index]; @@ -3154,7 +3119,7 @@ pub const Type = struct { } pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.layout == .Packed) return false; @@ -3167,7 +3132,7 @@ pub const Type = struct { } pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 { - const struct_type = mod.intern_pool.indexToKey(ty.ip_index).struct_type; + const struct_type = mod.intern_pool.indexToKey(ty.toIntern()).struct_type; const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.layout == .Packed); comptime assert(Type.packed_struct_layout_version == 2); @@ -3229,7 +3194,7 @@ pub const Type = struct { /// Get an iterator that iterates over all the struct field, returning the field and /// offset of that field. Asserts that the type is a non-packed struct. pub fn iterateStructOffsets(ty: Type, mod: *Module) StructOffsetIterator { - const struct_type = mod.intern_pool.indexToKey(ty.ip_index).struct_type; + const struct_type = mod.intern_pool.indexToKey(ty.toIntern()).struct_type; const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); @@ -3238,7 +3203,7 @@ pub const Type = struct { /// Supports structs and unions. pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.haveLayout()); @@ -3296,7 +3261,7 @@ pub const Type = struct { } pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; return struct_obj.srcLoc(mod); @@ -3316,7 +3281,7 @@ pub const Type = struct { } pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?Module.Decl.Index { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null; return struct_obj.owner_decl; @@ -3332,33 +3297,30 @@ pub const Type = struct { } pub fn isGenericPoison(ty: Type) bool { - return ty.ip_index == .generic_poison_type; + return ty.toIntern() == .generic_poison_type; } pub fn isTuple(ty: Type, mod: *Module) bool { - return switch (ty.ip_index) { - .none => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; - return struct_obj.is_tuple; - }, - .anon_struct_type => |anon_struct| anon_struct.names.len == 0, - else => false, + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + return struct_obj.is_tuple; }, + .anon_struct_type => |anon_struct| anon_struct.names.len == 0, + else => false, }; } pub fn isAnonStruct(ty: Type, mod: *Module) bool { - if (ty.ip_index == .empty_struct_type) return true; - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + if (ty.toIntern() == .empty_struct_type) return true; + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0, else => false, }; } pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; return struct_obj.is_tuple; @@ -3369,14 +3331,14 @@ pub const Type = struct { } pub fn isSimpleTuple(ty: Type, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, else => false, }; } pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => true, else => false, }; diff --git a/src/value.zig b/src/value.zig index bf201aceaf6e..473b1c967cc7 100644 --- a/src/value.zig +++ b/src/value.zig @@ -345,7 +345,7 @@ pub const Value = struct { } pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { - if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), ty.toIntern()); + if (val.ip_index != .none) return (try mod.getCoerced(val, ty)).toIntern(); switch (val.tag()) { .eu_payload => { const pl = val.castTag(.eu_payload).?.data; @@ -506,11 +506,7 @@ pub const Value = struct { else => unreachable, }; }, - .enum_type => |enum_type| (try ip.getCoerced( - mod.gpa, - val.toIntern(), - enum_type.tag_ty, - )).toValue(), + .enum_type => |enum_type| try mod.getCoerced(val, enum_type.tag_ty.toType()), else => unreachable, }; } @@ -872,10 +868,15 @@ pub const Value = struct { .Packed => { var bits: u16 = 0; const fields = ty.structFields(mod).values(); - const field_vals = val.castTag(.aggregate).?.data; + const storage = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage; for (fields, 0..) |field, i| { const field_bits = @intCast(u16, field.ty.bitSize(mod)); - try field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits); + const field_val = switch (storage) { + .bytes => unreachable, + .elems => |elems| elems[i], + .repeated_elem => |elem| elem, + }; + try field_val.toValue().writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits); bits += field_bits; } }, @@ -2006,23 +2007,30 @@ pub const Value = struct { } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { + return val.ip_index != .none and switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => false, + else => val.isPtrToThreadLocalInner(mod), + }; + } + + pub fn isPtrToThreadLocalInner(val: Value, mod: *Module) bool { return val.ip_index != .none and switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| variable.is_threadlocal, .ptr => |ptr| switch (ptr.addr) { .decl => |decl_index| { const decl = mod.declPtr(decl_index); assert(decl.has_tv); - return decl.val.isPtrToThreadLocal(mod); + return decl.val.isPtrToThreadLocalInner(mod); }, .mut_decl => |mut_decl| { const decl = mod.declPtr(mut_decl.decl); assert(decl.has_tv); - return decl.val.isPtrToThreadLocal(mod); + return decl.val.isPtrToThreadLocalInner(mod); }, .int => false, - .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod), - .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod), - .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod), + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocalInner(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocalInner(mod), + .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocalInner(mod), }, else => false, }; @@ -2045,7 +2053,18 @@ pub const Value = struct { else => unreachable, }, .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ - .ty = mod.intern_pool.typeOf(val.toIntern()), + .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) { + .array_type => |array_type| try mod.arrayType(.{ + .len = @intCast(u32, end - start), + .child = array_type.child, + .sentinel = if (end == array_type.len) array_type.sentinel else .none, + }), + .vector_type => |vector_type| try mod.vectorType(.{ + .len = @intCast(u32, end - start), + .child = vector_type.child, + }), + else => unreachable, + }.toIntern(), .storage = switch (aggregate.storage) { .bytes => |bytes| .{ .bytes = bytes[start..end] }, .elems => |elems| .{ .elems = elems[start..end] }, diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 740857083065..62a0f3fefc91 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -347,9 +347,15 @@ def get_child_index(self, name): except: return -1 def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index in range(2) else None -def Inst_Ref_SummaryProvider(value, _=None): +def Zir_Inst__Zir_Inst_Ref_SummaryProvider(value, _=None): members = value.type.enum_members - return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned - len(members)) + # ignore .var_args_param_type and .none + return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned + 2 - len(members)) + +def Air_Inst__Air_Inst_Ref_SummaryProvider(value, _=None): + members = value.type.enum_members + # ignore .none + return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned + 1 - len(members)) class Module_Decl__Module_Decl_Index_SynthProvider: def __init__(self, value, _=None): self.value = value @@ -676,8 +682,9 @@ def __lldb_init_module(debugger, _=None): add(debugger, category='zig.stage2', type='Zir.Inst', identifier='TagAndPayload', synth=True, inline_children=True, summary=True) add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Zir\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True) add(debugger, category='zig.stage2', regex=True, type='^Zir\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True) - add(debugger, category='zig.stage2', type='Zir.Inst::Zir.Inst.Ref', identifier='Inst_Ref', summary=True) + add(debugger, category='zig.stage2', type='Zir.Inst::Zir.Inst.Ref', summary=True) add(debugger, category='zig.stage2', type='Air.Inst', identifier='TagAndPayload', synth=True, inline_children=True, summary=True) + add(debugger, category='zig.stage2', type='Air.Inst::Air.Inst.Ref', summary=True) add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Air\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True) add(debugger, category='zig.stage2', regex=True, type='^Air\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True) add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True) From abded5cbb0b8c0b383b2362a2b89447528fe536c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 26 May 2023 14:58:07 -0400 Subject: [PATCH 105/205] TypedValue: implement more prints --- src/Module.zig | 38 +++++++---- src/TypedValue.zig | 148 ++++++++++++++++++++++++++++++------------- src/codegen/llvm.zig | 4 +- 3 files changed, 131 insertions(+), 59 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 0e57a0fc06ba..f78f53300615 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6705,18 +6705,13 @@ pub fn getCoerced(mod: *Module, val: Value, new_ty: Type) Allocator.Error!Value } pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type { - const i = try intern(mod, .{ .int_type = .{ + return (try intern(mod, .{ .int_type = .{ .signedness = signedness, .bits = bits, - } }); - return i.toType(); + } })).toType(); } pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type { - if (std.debug.runtime_safety and info.sentinel != .none) { - const sent_ty = mod.intern_pool.typeOf(info.sentinel); - assert(sent_ty == info.child); - } const i = try intern(mod, .{ .array_type = info }); return i.toType(); } @@ -6732,12 +6727,31 @@ pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error! } pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type { - if (std.debug.runtime_safety and info.sentinel != .none) { - const sent_ty = mod.intern_pool.typeOf(info.sentinel); - assert(sent_ty == info.elem_type); + var canon_info = info; + + // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee + // type, we change it to 0 here. If this causes an assertion trip because the + // pointee type needs to be resolved more, that needs to be done before calling + // this ptr() function. + if (info.alignment.toByteUnitsOptional()) |info_align| { + const elem_align = info.elem_type.toType().abiAlignment(mod); + if (info.elem_type.toType().layoutIsResolved(mod) and info_align == elem_align) { + canon_info.alignment = .none; + } } - const i = try intern(mod, .{ .ptr_type = info }); - return i.toType(); + + // Canonicalize host_size. If it matches the bit size of the pointee type, + // we change it to 0 here. If this causes an assertion trip, the pointee type + // needs to be resolved before calling this ptr() function. + if (info.host_size != 0) { + const elem_bit_size = info.elem_type.toType().bitSize(mod); + assert(info.bit_offset + elem_bit_size <= info.host_size * 8); + if (info.host_size * 8 == elem_bit_size) { + canon_info.host_size = 0; + } + } + + return (try intern(mod, .{ .ptr_type = canon_info })).toType(); } pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { diff --git a/src/TypedValue.zig b/src/TypedValue.zig index dd0ae66a6390..8770917a0123 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -33,7 +33,7 @@ pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue { } pub fn eql(a: TypedValue, b: TypedValue, mod: *Module) bool { - if (a.ty.ip_index != b.ty.ip_index) return false; + if (a.ty.toIntern() != b.ty.toIntern()) return false; return a.val.eql(b.val, a.ty, mod); } @@ -76,11 +76,7 @@ pub fn print( ) (@TypeOf(writer).Error || Allocator.Error)!void { var val = tv.val; var ty = tv.ty; - if (val.isVariable(mod)) - return writer.writeAll("(variable)"); - while (true) switch (val.ip_index) { - .empty_struct => return printAggregate(ty, val, writer, level, mod), .none => switch (val.tag()) { .aggregate => return printAggregate(ty, val, writer, level, mod), .@"union" => { @@ -91,7 +87,7 @@ pub fn print( try writer.writeAll(".{ "); try print(.{ - .ty = mod.unionPtr(mod.intern_pool.indexToKey(ty.ip_index).union_type.index).tag_ty, + .ty = mod.unionPtr(mod.intern_pool.indexToKey(ty.toIntern()).union_type.index).tag_ty, .val = union_val.tag, }, writer, level - 1, mod); try writer.writeAll(" = "); @@ -176,47 +172,112 @@ pub fn print( .opt_payload => { val = val.castTag(.opt_payload).?.data; ty = ty.optionalChild(mod); - return print(.{ .ty = ty, .val = val }, writer, level, mod); }, }, - else => { - const key = mod.intern_pool.indexToKey(val.ip_index); - if (key.typeOf() == .type_type) { - return Type.print(val.toType(), writer, mod); - } - switch (key) { - .int => |int| switch (int.storage) { - inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), - .lazy_align => |lazy_ty| return writer.print("{d}", .{ - lazy_ty.toType().abiAlignment(mod), - }), - .lazy_size => |lazy_ty| return writer.print("{d}", .{ - lazy_ty.toType().abiSize(mod), - }), + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => return Type.print(val.toType(), writer, mod), + .undef => return writer.writeAll("undefined"), + .runtime_value => return writer.writeAll("(runtime value)"), + .simple_value => |simple_value| switch (simple_value) { + .empty_struct => return printAggregate(ty, val, writer, level, mod), + .generic_poison => return writer.writeAll("(generic poison)"), + else => return writer.writeAll(@tagName(simple_value)), + }, + .variable => return writer.writeAll("(variable)"), + .extern_func => |extern_func| return writer.print("(extern function '{s}')", .{ + mod.declPtr(extern_func.decl).name, + }), + .func => |func| return writer.print("(function '{s}')", .{ + mod.declPtr(mod.funcPtr(func.index).owner_decl).name, + }), + .int => |int| switch (int.storage) { + inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), + .lazy_align => |lazy_ty| return writer.print("{d}", .{ + lazy_ty.toType().abiAlignment(mod), + }), + .lazy_size => |lazy_ty| return writer.print("{d}", .{ + lazy_ty.toType().abiSize(mod), + }), + }, + .err => |err| return writer.print("error.{s}", .{ + mod.intern_pool.stringToSlice(err.name), + }), + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| return writer.print("error.{s}", .{ + mod.intern_pool.stringToSlice(err_name), + }), + .payload => |payload| { + val = payload.toValue(); + ty = ty.errorUnionPayload(mod); }, - .enum_tag => |enum_tag| { - if (level == 0) { - return writer.writeAll("(enum)"); - } + }, + .enum_literal => |enum_literal| return writer.print(".{s}", .{ + mod.intern_pool.stringToSlice(enum_literal), + }), + .enum_tag => |enum_tag| { + if (level == 0) { + return writer.writeAll("(enum)"); + } - try writer.writeAll("@intToEnum("); + try writer.writeAll("@intToEnum("); + try print(.{ + .ty = Type.type, + .val = enum_tag.ty.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(", "); + try print(.{ + .ty = mod.intern_pool.typeOf(enum_tag.int).toType(), + .val = enum_tag.int.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(")"); + return; + }, + .float => |float| switch (float.storage) { + inline else => |x| return writer.print("{}", .{x}), + }, + .ptr => return writer.writeAll("(ptr)"), + .opt => |opt| switch (opt.val) { + .none => return writer.writeAll("null"), + else => { + val = opt.val.toValue(); + ty = ty.optionalChild(mod); + }, + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}), + .elems, .repeated_elem => return printAggregate(ty, val, writer, level, mod), + }, + .un => |un| { + try writer.writeAll(".{ "); + if (level > 0) { try print(.{ - .ty = Type.type, - .val = enum_tag.ty.toValue(), + .ty = ty.unionTagTypeHypothetical(mod), + .val = un.tag.toValue(), }, writer, level - 1, mod); - try writer.writeAll(", "); + try writer.writeAll(" = "); try print(.{ - .ty = mod.intern_pool.typeOf(enum_tag.int).toType(), - .val = enum_tag.int.toValue(), + .ty = ty.unionFieldType(un.tag.toValue(), mod), + .val = un.val.toValue(), }, writer, level - 1, mod); - try writer.writeAll(")"); - return; - }, - .float => |float| switch (float.storage) { - inline else => |x| return writer.print("{}", .{x}), - }, - else => return writer.print("{}", .{val.ip_index}), - } + } else try writer.writeAll("..."); + return writer.writeAll(" }"); + }, }, }; } @@ -238,12 +299,9 @@ fn printAggregate( var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); - switch (ty.ip_index) { - .none => {}, // TODO make this unreachable after finishing InternPool migration - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type, .anon_struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), - else => {}, - }, + switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type, .anon_struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), + else => {}, } try print(.{ .ty = ty.structFieldType(i, mod), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d4b7e8eb3d06..d8cf13ce6fd5 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3255,9 +3255,9 @@ pub const DeclGen = struct { try dg.module.markDeclAlive(fn_decl); return dg.resolveLlvmFunction(fn_decl_index); }, - .int => |int| { + .int => { var bigint_space: Value.BigIntSpace = undefined; - const bigint = int.storage.toBigInt(&bigint_space); + const bigint = tv.val.toBigInt(&bigint_space, mod); return lowerBigInt(dg, tv.ty, bigint); }, .err => |err| { From 63dc0447fc4654324ef8efcfa65849f7ef682531 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 May 2023 13:21:35 -0700 Subject: [PATCH 106/205] wasm: fix error union constant lowering --- src/arch/wasm/CodeGen.zig | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 0c771974179a..2715af08f250 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3183,15 +3183,26 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { const kv = try mod.getErrorValue(name); return WValue{ .imm32 = kv.value }; }, - .error_union => { - const error_type = ty.errorUnionSet(mod); + .error_union => |error_union| { + const err_tv: TypedValue = switch (error_union.val) { + .err_name => |err_name| .{ + .ty = ty.errorUnionSet(mod), + .val = (try mod.intern(.{ .err = .{ + .ty = ty.errorUnionSet(mod).toIntern(), + .name = err_name, + } })).toValue(), + }, + .payload => .{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, 0), + }, + }; const payload_type = ty.errorUnionPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const is_pl = val.errorUnionIsPayload(mod); - const err_val = if (!is_pl) val else try mod.intValue(error_type, 0); - return func.lowerConstant(err_val, error_type); + return func.lowerConstant(err_tv.val, err_tv.ty); } + return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, .enum_tag => |enum_tag| { From ace5a5e3ccfce69c93426913f247ea7bad7703ff Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 May 2023 13:21:46 -0700 Subject: [PATCH 107/205] llvm: simplify control flow lowering structs --- src/codegen/llvm.zig | 115 +++++++++++++++++++++---------------------- 1 file changed, 56 insertions(+), 59 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d8cf13ce6fd5..b84a8c8c0764 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3537,78 +3537,75 @@ pub const DeclGen = struct { @intCast(c_uint, llvm_elems.len), ); }, - .struct_type, .anon_struct_type => { - const llvm_struct_ty = try dg.lowerType(tv.ty); + .anon_struct_type => |tuple| { const gpa = dg.gpa; - const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) { - .anon_struct_type => |tuple| { - var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; - defer llvm_fields.deinit(gpa); - - try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; + defer llvm_fields.deinit(gpa); - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; + try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); - for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { - if (field_val != .none) continue; - if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; - const field_align = field_ty.toType().abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); - const field_llvm_val = try dg.lowerValue(.{ - .ty = field_ty.toType(), - .val = try tv.val.fieldValue(mod, i), - }); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } - need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); + const field_llvm_val = try dg.lowerValue(.{ + .ty = field_ty.toType(), + .val = try tv.val.fieldValue(mod, i), + }); - llvm_fields.appendAssumeCapacity(field_llvm_val); + need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); - offset += field_ty.toType().abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } + llvm_fields.appendAssumeCapacity(field_llvm_val); - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - }, - .struct_type => |struct_type| struct_type, - else => unreachable, - }; + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } + } + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + const llvm_struct_ty = try dg.lowerType(tv.ty); + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); + } + }, + .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const llvm_struct_ty = try dg.lowerType(tv.ty); + const gpa = dg.gpa; if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); From d5f0ee0d62e48e623625779a0cf722c3f0f66921 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 May 2023 13:21:59 -0700 Subject: [PATCH 108/205] codegen: fix lowering of constant structs --- src/codegen.zig | 83 ++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 68 insertions(+), 15 deletions(-) diff --git a/src/codegen.zig b/src/codegen.zig index 6dbf3f847a21..28fd59a66a25 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -441,46 +441,99 @@ pub fn generateSymbol( })) orelse return error.Overflow; if (padding > 0) try code.writer().writeByteNTimes(0, padding); }, - .struct_type, .anon_struct_type => { - if (typed_value.ty.containerLayout(mod) == .Packed) { - const struct_obj = mod.typeToStruct(typed_value.ty).?; + .anon_struct_type => |tuple| { + const struct_begin = code.items.len; + for (tuple.types, 0..) |field_ty, index| { + if (!field_ty.toType().hasRuntimeBits(mod)) continue; + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field_ty, + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }; + + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty.toType(), + .val = field_val.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + const unpadded_field_end = code.items.len - struct_begin; + + // Pad struct members if required + const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); + const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse + return error.Overflow; + + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } + } + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + + if (struct_obj.layout == .Packed) { const fields = struct_obj.fields.values(); - const field_vals = typed_value.val.castTag(.aggregate).?.data; - const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse + return error.Overflow; const current_pos = code.items.len; try code.resize(current_pos + abi_size); var bits: u16 = 0; - for (field_vals, 0..) |field_val, index| { - const field_ty = fields[index].ty; + for (fields, 0..) |field, index| { + const field_ty = field.ty; + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field_ty.toIntern(), + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }; + // pointer may point to a decl which must be marked used - // but can also result in a relocation. Therefore we handle those seperately. + // but can also result in a relocation. Therefore we handle those separately. if (field_ty.zigTypeTag(mod) == .Pointer) { - const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow; + const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse + return error.Overflow; var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); defer tmp_list.deinit(); switch (try generateSymbol(bin_file, src_loc, .{ .ty = field_ty, - .val = field_val, + .val = field_val.toValue(), }, &tmp_list, debug_output, reloc_info)) { .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), .fail => |em| return Result{ .fail = em }, } } else { - field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; + field_val.toValue().writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; } bits += @intCast(u16, field_ty.bitSize(mod)); } } else { const struct_begin = code.items.len; - const field_vals = typed_value.val.castTag(.aggregate).?.data; - for (field_vals, 0..) |field_val, index| { - const field_ty = typed_value.ty.structFieldType(index, mod); + for (struct_obj.fields.values(), 0..) |field, index| { + const field_ty = field.ty; if (!field_ty.hasRuntimeBits(mod)) continue; + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field_ty.toIntern(), + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }; + switch (try generateSymbol(bin_file, src_loc, .{ .ty = field_ty, - .val = field_val, + .val = field_val.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, From 9cd0ca9f482ef7f76d3f3ca683913e9aceaa47fe Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 26 May 2023 16:04:53 -0400 Subject: [PATCH 109/205] Module: rename functions to make ownership checks explicit This makes the difference between `decl.getOwnedFunction` and `decl.val.getFunction` more clear when reading the code. --- src/Module.zig | 66 ++++++++++++++++++++------------------- src/Sema.zig | 12 +++---- src/arch/wasm/CodeGen.zig | 2 +- src/codegen/c.zig | 12 +++---- src/codegen/llvm.zig | 24 +++++++------- src/codegen/spirv.zig | 6 ++-- src/link/Coff.zig | 10 +++--- src/link/Dwarf.zig | 4 +-- src/link/Elf.zig | 8 ++--- src/link/MachO.zig | 14 ++++----- src/link/Plan9.zig | 8 ++--- src/link/SpirV.zig | 2 +- src/link/Wasm.zig | 10 +++--- 13 files changed, 90 insertions(+), 88 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index f78f53300615..e24f4e501ca2 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -613,7 +613,7 @@ pub const Decl = struct { pub fn clearValues(decl: *Decl, mod: *Module) void { const gpa = mod.gpa; - if (decl.getFunctionIndex(mod).unwrap()) |func| { + if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); if (mod.funcPtr(func).comptime_args != null) { _ = mod.monomorphed_funcs.removeContext(func, .{ .mod = mod }); @@ -772,52 +772,52 @@ pub const Decl = struct { return tv.ty.zigTypeTag(mod) == .Fn; } - /// If the Decl has a value and it is a struct, return it, + /// If the Decl owns its value and it is a struct, return it, /// otherwise null. - pub fn getStruct(decl: Decl, mod: *Module) ?*Struct { - return mod.structPtrUnwrap(decl.getStructIndex(mod)); + pub fn getOwnedStruct(decl: Decl, mod: *Module) ?*Struct { + return mod.structPtrUnwrap(decl.getOwnedStructIndex(mod)); } - pub fn getStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex { + pub fn getOwnedStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex { if (!decl.owns_tv) return .none; if (decl.val.ip_index == .none) return .none; return mod.intern_pool.indexToStructType(decl.val.toIntern()); } - /// If the Decl has a value and it is a union, return it, + /// If the Decl owns its value and it is a union, return it, /// otherwise null. - pub fn getUnion(decl: Decl, mod: *Module) ?*Union { + pub fn getOwnedUnion(decl: Decl, mod: *Module) ?*Union { if (!decl.owns_tv) return null; if (decl.val.ip_index == .none) return null; return mod.typeToUnion(decl.val.toType()); } - /// If the Decl has a value and it is a function, return it, + /// If the Decl owns its value and it is a function, return it, /// otherwise null. - pub fn getFunction(decl: Decl, mod: *Module) ?*Fn { - return mod.funcPtrUnwrap(decl.getFunctionIndex(mod)); + pub fn getOwnedFunction(decl: Decl, mod: *Module) ?*Fn { + return mod.funcPtrUnwrap(decl.getOwnedFunctionIndex(mod)); } - pub fn getFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex { + pub fn getOwnedFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex { return if (decl.owns_tv) decl.val.getFunctionIndex(mod) else .none; } - /// If the Decl has a value and it is an extern function, returns it, + /// If the Decl owns its value and it is an extern function, returns it, /// otherwise null. - pub fn getExternFunc(decl: Decl, mod: *Module) ?InternPool.Key.ExternFunc { + pub fn getOwnedExternFunc(decl: Decl, mod: *Module) ?InternPool.Key.ExternFunc { return if (decl.owns_tv) decl.val.getExternFunc(mod) else null; } - /// If the Decl has a value and it is a variable, returns it, + /// If the Decl owns its value and it is a variable, returns it, /// otherwise null. - pub fn getVariable(decl: Decl, mod: *Module) ?InternPool.Key.Variable { + pub fn getOwnedVariable(decl: Decl, mod: *Module) ?InternPool.Key.Variable { return if (decl.owns_tv) decl.val.getVariable(mod) else null; } /// Gets the namespace that this Decl creates by being a struct, union, /// enum, or opaque. /// Only returns it if the Decl is the owner. - pub fn getInnerNamespaceIndex(decl: Decl, mod: *Module) Namespace.OptionalIndex { + pub fn getOwnedInnerNamespaceIndex(decl: Decl, mod: *Module) Namespace.OptionalIndex { if (!decl.owns_tv) return .none; return switch (decl.val.ip_index) { .empty_struct_type => .none, @@ -833,8 +833,8 @@ pub const Decl = struct { } /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. - pub fn getInnerNamespace(decl: Decl, mod: *Module) ?*Namespace { - return if (decl.getInnerNamespaceIndex(mod).unwrap()) |i| mod.namespacePtr(i) else null; + pub fn getOwnedInnerNamespace(decl: Decl, mod: *Module) ?*Namespace { + return mod.namespacePtrUnwrap(decl.getOwnedInnerNamespaceIndex(mod)); } pub fn dump(decl: *Decl) void { @@ -3361,7 +3361,7 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { gpa.free(kv.value); } if (decl.has_tv) { - if (decl.getInnerNamespaceIndex(mod).unwrap()) |i| { + if (decl.getOwnedInnerNamespaceIndex(mod).unwrap()) |i| { mod.namespacePtr(i).destroyDecls(mod); mod.destroyNamespace(i); } @@ -3407,6 +3407,10 @@ pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.I return mod.intern_pool.inferredErrorSetPtr(index); } +pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace { + return mod.namespacePtr(index.unwrap() orelse return null); +} + /// This one accepts an index from the InternPool and asserts that it is not /// the anonymous empty struct type. pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { @@ -3873,28 +3877,28 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { if (!decl.owns_tv) continue; - if (decl.getStruct(mod)) |struct_obj| { + if (decl.getOwnedStruct(mod)) |struct_obj| { struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; } - if (decl.getUnion(mod)) |union_obj| { + if (decl.getOwnedUnion(mod)) |union_obj| { union_obj.zir_index = inst_map.get(union_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; } - if (decl.getFunction(mod)) |func| { + if (decl.getOwnedFunction(mod)) |func| { func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; } - if (decl.getInnerNamespace(mod)) |namespace| { + if (decl.getOwnedInnerNamespace(mod)) |namespace| { for (namespace.decls.keys()) |sub_decl| { try decl_stack.append(gpa, sub_decl); } @@ -4074,7 +4078,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.deleteDeclExports(decl_index); // Similarly, `@setAlignStack` invocations will be re-discovered. - if (decl.getFunctionIndex(mod).unwrap()) |func| { + if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); } @@ -4577,7 +4581,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (mod.declIsRoot(decl_index)) { log.debug("semaDecl root {*} ({s})", .{ decl, decl.name }); const main_struct_inst = Zir.main_struct_inst; - const struct_index = decl.getStructIndex(mod).unwrap().?; + const struct_index = decl.getOwnedStructIndex(mod).unwrap().?; const struct_obj = mod.structPtr(struct_index); // This might not have gotten set in `semaFile` if the first time had // a ZIR failure, so we set it here in case. @@ -4659,7 +4663,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (decl.has_tv) { prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); type_changed = !decl.ty.eql(decl_tv.ty, mod); - if (decl.getFunction(mod)) |prev_func| { + if (decl.getOwnedFunction(mod)) |prev_func| { prev_is_inline = prev_func.state == .inline_only; } } @@ -5313,7 +5317,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err decl.has_align = has_align; decl.has_linksection_or_addrspace = has_linksection_or_addrspace; decl.zir_decl_index = @intCast(u32, decl_sub_index); - if (decl.getFunctionIndex(mod) != .none) { + if (decl.getOwnedFunctionIndex(mod) != .none) { switch (comp.bin_file.tag) { .coff, .elf, .macho, .plan9 => { // TODO Look into detecting when this would be unnecessary by storing enough state @@ -5390,7 +5394,7 @@ pub fn clearDecl( if (decl.ty.isFnOrHasRuntimeBits(mod)) { mod.comp.bin_file.freeDecl(decl_index); } - if (decl.getInnerNamespace(mod)) |namespace| { + if (decl.getOwnedInnerNamespace(mod)) |namespace| { try namespace.deleteAllDecls(mod, outdated_decls); } } @@ -5733,10 +5737,8 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { if (mod.cimport_errors.fetchSwapRemove(decl_index)) |kv| { for (kv.value) |err| err.deinit(mod.gpa); } - if (decl.has_tv and decl.owns_tv) { - if (decl.getFunctionIndex(mod).unwrap()) |func| { - _ = mod.align_stack_fns.remove(func); - } + if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { + _ = mod.align_stack_fns.remove(func); } if (mod.emit_h) |emit_h| { if (emit_h.failed_decls.fetchSwapRemove(decl_index)) |kv| { diff --git a/src/Sema.zig b/src/Sema.zig index 51e58f2e7b78..8e09d5f378ee 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5730,7 +5730,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { try mod.ensureDeclAnalyzed(decl_index); const exported_decl = mod.declPtr(decl_index); - if (exported_decl.getFunction(mod)) |function| { + if (exported_decl.val.getFunction(mod)) |function| { return sema.analyzeExport(block, src, options, function.owner_decl); } } @@ -6206,7 +6206,7 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { .extern_func => |extern_func| extern_func.decl, .func => |func| mod.funcPtr(func.index).owner_decl, .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| decl, + .decl => |decl| mod.declPtr(decl).val.getFunction(mod).?.owner_decl, else => return null, }, else => return null, @@ -6782,7 +6782,7 @@ fn analyzeCall( }), .func => |function| function.index, .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| mod.declPtr(decl).getFunctionIndex(mod).unwrap().?, + .decl => |decl| mod.declPtr(decl).val.getFunctionIndex(mod).unwrap().?, else => { assert(callee_ty.isPtrAtRuntime(mod)); return sema.fail(block, call_src, "{s} call of function pointer", .{ @@ -7403,7 +7403,7 @@ fn instantiateGenericCall( const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .func => |function| function.index, - .ptr => |ptr| mod.declPtr(ptr.addr.decl).getFunctionIndex(mod).unwrap().?, + .ptr => |ptr| mod.declPtr(ptr.addr.decl).val.getFunctionIndex(mod).unwrap().?, else => unreachable, }); // Check the Module's generic function map with an adapted context, so that we @@ -28336,7 +28336,7 @@ fn beginComptimePtrLoad( const is_mutable = ptr.addr == .mut_decl; const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); - if (decl.getVariable(mod) != null) return error.RuntimeLoad; + if (decl.val.getVariable(mod) != null) return error.RuntimeLoad; const layout_defined = decl.ty.hasWellDefinedLayout(mod); break :blk ComptimePtrLoadKit{ @@ -29423,7 +29423,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo const ptr_ty = try mod.ptrType(.{ .elem_type = decl_tv.ty.toIntern(), .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), - .is_const = if (decl.getVariable(mod)) |variable| variable.is_const else false, + .is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else false, .address_space = decl.@"addrspace", }); if (analyze_fn_body) { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2715af08f250..91743e0d643f 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2210,7 +2210,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif try func.bin_file.addOrUpdateImport( mem.sliceTo(ext_decl.name, 0), atom.getSymbolIndex().?, - mod.intern_pool.stringToSliceUnwrap(ext_decl.getExternFunc(mod).?.lib_name), + mod.intern_pool.stringToSliceUnwrap(ext_decl.getOwnedExternFunc(mod).?.lib_name), type_index, ); break :blk extern_func.decl; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 59d00f5849f0..ab69514ee146 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -549,12 +549,12 @@ pub const DeclGen = struct { } // Chase function values in order to be able to reference the original function. - if (decl.getFunction(mod)) |func| if (func.owner_decl != decl_index) + if (decl.val.getFunction(mod)) |func| if (func.owner_decl != decl_index) return dg.renderDeclValue(writer, ty, val, func.owner_decl, location); - if (decl.getExternFunc(mod)) |extern_func| if (extern_func.decl != decl_index) + if (decl.val.getExternFunc(mod)) |extern_func| if (extern_func.decl != decl_index) return dg.renderDeclValue(writer, ty, val, extern_func.decl, location); - if (decl.getVariable(mod)) |variable| try dg.renderFwdDecl(decl_index, variable); + if (decl.val.getVariable(mod)) |variable| try dg.renderFwdDecl(decl_index, variable); // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function @@ -1580,7 +1580,7 @@ pub const DeclGen = struct { else => unreachable, } } - if (fn_decl.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold "); + if (fn_decl.val.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold "); if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( @@ -2740,13 +2740,13 @@ pub fn genDecl(o: *Object) !void { const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return; - if (decl.getExternFunc(mod)) |_| { + if (tv.val.getExternFunc(mod)) |_| { const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll("zig_extern "); try o.dg.renderFunctionSignature(fwd_decl_writer, decl_c_value.decl, .forward, .{ .export_index = 0 }); try fwd_decl_writer.writeAll(";\n"); try genExports(o); - } else if (decl.getVariable(mod)) |variable| { + } else if (tv.val.getVariable(mod)) |variable| { try o.dg.renderFwdDecl(decl_c_value.decl, variable); try genExports(o); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index b84a8c8c0764..1d3749f6a3d6 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1165,7 +1165,7 @@ pub const Object = struct { di_file = try dg.object.getDIFile(gpa, mod.namespacePtr(decl.src_namespace).file_scope); const line_number = decl.src_line + 1; - const is_internal_linkage = decl.getExternFunc(mod) == null and + const is_internal_linkage = decl.val.getExternFunc(mod) == null and !mod.decl_exports.contains(decl_index); const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type) llvm.DIFlags.NoReturn @@ -1274,7 +1274,7 @@ pub const Object = struct { var free_decl_name = false; const decl_name = decl_name: { if (mod.getTarget().isWasm() and try decl.isFunction(mod)) { - if (mod.intern_pool.stringToSliceUnwrap(decl.getExternFunc(mod).?.lib_name)) |lib_name| { + if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { if (!std.mem.eql(u8, lib_name, "c")) { free_decl_name = true; break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ decl.name, lib_name }); @@ -1306,7 +1306,7 @@ pub const Object = struct { di_global.replaceLinkageName(linkage_name); } } - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { if (variable.is_threadlocal) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { @@ -1348,7 +1348,7 @@ pub const Object = struct { defer gpa.free(section_z); llvm_global.setSection(section_z); } - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { if (variable.is_threadlocal) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } @@ -1382,7 +1382,7 @@ pub const Object = struct { llvm_global.setLinkage(.Internal); if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); llvm_global.setUnnamedAddr(.True); - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { const single_threaded = mod.comp.bin_file.options.single_threaded; if (variable.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); @@ -2452,7 +2452,7 @@ pub const DeclGen = struct { log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty.fmtDebug(), decl.val.fmtDebug(), }); - if (decl.getExternFunc(mod)) |extern_func| { + if (decl.val.getExternFunc(mod)) |extern_func| { _ = try dg.resolveLlvmFunction(extern_func.decl); } else { const target = mod.getTarget(); @@ -2460,7 +2460,7 @@ pub const DeclGen = struct { global.setAlignment(decl.getAlignment(mod)); if (decl.@"linksection") |section| global.setSection(section); assert(decl.has_tv); - const init_val = if (decl.getVariable(mod)) |variable| init_val: { + const init_val = if (decl.val.getVariable(mod)) |variable| init_val: { break :init_val variable.init.toValue(); } else init_val: { global.setGlobalConstant(.True); @@ -2555,7 +2555,7 @@ pub const DeclGen = struct { } else { if (target.isWasm()) { dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); - if (mod.intern_pool.stringToSliceUnwrap(decl.getExternFunc(mod).?.lib_name)) |lib_name| { + if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { if (!std.mem.eql(u8, lib_name, "c")) { dg.addFnAttrString(llvm_fn, "wasm-import-module", lib_name); } @@ -2716,7 +2716,7 @@ pub const DeclGen = struct { llvm_global.setValueName(decl.name); llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { const single_threaded = mod.comp.bin_file.options.single_threaded; if (variable.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); @@ -3993,11 +3993,11 @@ pub const DeclGen = struct { // ... &bar; // `bar` is just an alias and we actually want to lower a reference to `foo`. const decl = mod.declPtr(decl_index); - if (decl.getFunction(mod)) |func| { + if (decl.val.getFunction(mod)) |func| { if (func.owner_decl != decl_index) { return self.lowerDeclRefValue(tv, func.owner_decl); } - } else if (decl.getExternFunc(mod)) |func| { + } else if (decl.val.getExternFunc(mod)) |func| { if (func.decl != decl_index) { return self.lowerDeclRefValue(tv, func.decl); } @@ -7939,7 +7939,7 @@ pub const FuncGen = struct { } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const func = self.dg.decl.getFunction(mod).?; + const func = self.dg.decl.getOwnedFunction(mod).?; const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const di_local_var = dib.createParameterVariable( diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 80e98dbcd3c5..64a0a7ec57bc 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -261,7 +261,7 @@ pub const DeclGen = struct { const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { // TODO: Extern fn? - const kind: SpvModule.DeclKind = if (decl.getFunctionIndex(self.module) != .none) + const kind: SpvModule.DeclKind = if (decl.val.getFunctionIndex(self.module) != .none) .func else .global; @@ -1544,7 +1544,7 @@ pub const DeclGen = struct { const decl_id = self.spv.declPtr(spv_decl_index).result_id; log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name }); - if (decl.getFunction(mod)) |_| { + if (decl.val.getFunction(mod)) |_| { assert(decl.ty.zigTypeTag(mod) == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ @@ -1597,7 +1597,7 @@ pub const DeclGen = struct { try self.generateTestEntryPoint(fqn, spv_decl_index); } } else { - const init_val = if (decl.getVariable(mod)) |payload| + const init_val = if (decl.val.getVariable(mod)) |payload| payload.init.toValue() else decl.val; diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f4ee2fde976c..8b76e8dd69a7 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1156,10 +1156,10 @@ pub fn updateDecl( const decl = mod.declPtr(decl_index); - if (decl.getExternFunc(mod)) |_| { + if (decl.val.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -1172,7 +1172,7 @@ pub fn updateDecl( var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; + const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, @@ -1313,7 +1313,7 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (decl.getVariable(mod)) |_| { + if (val.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.rdata_section_index.?; @@ -1425,7 +1425,7 @@ pub fn updateDeclExports( // detect the default subsystem. for (exports) |exp| { const exported_decl = mod.declPtr(exp.exported_decl); - if (exported_decl.getFunctionIndex(mod) == .none) continue; + if (exported_decl.getOwnedFunctionIndex(mod) == .none) continue; const winapi_cc = switch (self.base.options.target.cpu.arch) { .x86 => std.builtin.CallingConvention.Stdcall, else => std.builtin.CallingConvention.C, diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index d6dd6979eaab..9d8076f59299 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -971,7 +971,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) // For functions we need to add a prologue to the debug line program. try dbg_line_buffer.ensureTotalCapacity(26); - const func = decl.getFunction(mod).?; + const func = decl.val.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ decl.src_line, func.lbrace_line, @@ -1523,7 +1523,7 @@ pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: Module.Decl. if (atom.len == 0) return; const decl = mod.declPtr(decl_index); - const func = decl.getFunction(mod).?; + const func = decl.val.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ decl.src_line, func.lbrace_line, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 476b939038d6..e4fa07620db1 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2465,7 +2465,7 @@ fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (decl.getVariable(mod)) |_| { + if (val.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.rodata_section_index.?; @@ -2647,10 +2647,10 @@ pub fn updateDecl( const decl = mod.declPtr(decl_index); - if (decl.getExternFunc(mod)) |_| { + if (decl.val.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -2667,7 +2667,7 @@ pub fn updateDecl( defer if (decl_state) |*ds| ds.deinit(); // TODO implement .debug_info for global variables - const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; + const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index ffbdcdb91f02..f7f975f920f3 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1984,16 +1984,16 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo const decl = mod.declPtr(decl_index); - if (decl.getExternFunc(mod)) |_| { + if (decl.val.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } } - const is_threadlocal = if (decl.getVariable(mod)) |variable| + const is_threadlocal = if (decl.val.getVariable(mod)) |variable| variable.is_threadlocal and !self.base.options.single_threaded else false; @@ -2012,7 +2012,7 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo null; defer if (decl_state) |*ds| ds.deinit(); - const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; + const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -2177,7 +2177,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D const decl = module.declPtr(decl_index); const decl_metadata = self.decls.get(decl_index).?; - const decl_val = decl.getVariable(mod).?.init.toValue(); + const decl_val = decl.val.getVariable(mod).?.init.toValue(); const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -2278,7 +2278,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { } } - if (decl.getVariable(mod)) |variable| { + if (val.getVariable(mod)) |variable| { if (variable.is_threadlocal and !single_threaded) { break :blk self.thread_data_section_index.?; } @@ -2289,7 +2289,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (decl.getVariable(mod)) |_| { + if (val.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.data_const_section_index.?; diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 2071833b9356..0803b6beef2f 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -392,10 +392,10 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void { const decl = mod.declPtr(decl_index); - if (decl.getExternFunc(mod)) |_| { + if (decl.val.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -407,7 +407,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; + const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -771,7 +771,7 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { // in the deleteUnusedDecl function. const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const is_fn = decl.getFunctionIndex(mod) != .none; + const is_fn = decl.val.getFunctionIndex(mod) != .none; if (is_fn) { var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?; var submap = symidx_and_submap.functions; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 0a6608303e09..89d6be1ec8da 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -138,7 +138,7 @@ pub fn updateDeclExports( exports: []const *Module.Export, ) !void { const decl = mod.declPtr(decl_index); - if (decl.getFunctionIndex(mod) != .none and decl.ty.fnCallingConvention(mod) == .Kernel) { + if (decl.val.getFunctionIndex(mod) != .none and decl.ty.fnCallingConvention(mod) == .Kernel) { // TODO: Unify with resolveDecl in spirv.zig. const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 78d1be978b20..96de121ffb4f 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1404,9 +1404,9 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi defer tracy.end(); const decl = mod.declPtr(decl_index); - if (decl.getFunction(mod)) |_| { + if (decl.val.getFunction(mod)) |_| { return; - } else if (decl.getExternFunc(mod)) |_| { + } else if (decl.val.getExternFunc(mod)) |_| { return; } @@ -1415,12 +1415,12 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi atom.clear(); if (decl.isExtern(mod)) { - const variable = decl.getVariable(mod).?; + const variable = decl.getOwnedVariable(mod).?; const name = mem.sliceTo(decl.name, 0); const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name); return wasm.addOrUpdateImport(name, atom.sym_index, lib_name, null); } - const val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; + const val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; var code_writer = std.ArrayList(u8).init(wasm.base.allocator); defer code_writer.deinit(); @@ -3373,7 +3373,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod const atom = wasm.getAtomPtr(atom_index); if (decl.ty.zigTypeTag(mod) == .Fn) { try wasm.parseAtom(atom_index, .function); - } else if (decl.getVariable(mod)) |variable| { + } else if (decl.getOwnedVariable(mod)) |variable| { if (variable.is_const) { try wasm.parseAtom(atom_index, .{ .data = .read_only }); } else if (variable.init.toValue().isUndefDeep(mod)) { From 1dc01f11401b6ec0be1e7685cdc445d1b10d4f19 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 26 May 2023 18:56:31 -0400 Subject: [PATCH 110/205] InternPool: fix build-exe and compiler-rt crashes --- src/InternPool.zig | 84 +++++++++++++++++- src/Module.zig | 4 - src/Sema.zig | 168 ++++++++++++++++++++---------------- src/arch/x86_64/CodeGen.zig | 2 +- src/codegen/c.zig | 2 +- src/type.zig | 18 ++-- src/value.zig | 5 ++ 7 files changed, 191 insertions(+), 92 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index ad47b4c84eb5..ea3bafaf4838 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2817,6 +2817,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .ptr_type => |ptr_type| { assert(ptr_type.elem_type != .none); + assert(ptr_type.sentinel == .none or ip.typeOf(ptr_type.sentinel) == ptr_type.elem_type); if (ptr_type.size == .Slice) { _ = ip.map.pop(); @@ -4780,7 +4781,88 @@ pub fn stringToSliceUnwrap(ip: InternPool, s: OptionalNullTerminatedString) ?[:0 } pub fn typeOf(ip: InternPool, index: Index) Index { - return ip.indexToKey(index).typeOf(); + // This optimization of static keys is required so that typeOf can be called + // on static keys that haven't been added yet during static key initialization. + // An alternative would be to topological sort the static keys, but this would + // mean that the range of type indices would not be dense. + return switch (index) { + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .anyopaque_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .anyframe_type, + .null_type, + .undefined_type, + .enum_literal_type, + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + .prefetch_options_type, + .export_options_type, + .extern_options_type, + .type_info_type, + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + .anyerror_void_error_union_type, + .generic_poison_type, + .empty_struct_type, + => .type_type, + .undef => .undefined_type, + .zero, .one, .negative_one => .comptime_int_type, + .zero_usize, .one_usize => .usize_type, + .zero_u8, .one_u8, .four_u8 => .u8_type, + .calling_convention_c, .calling_convention_inline => .calling_convention_type, + .void_value => .void_type, + .unreachable_value => .noreturn_type, + .null_value => .null_type, + .bool_true, .bool_false => .bool_type, + .empty_struct => .empty_struct_type, + .generic_poison => .generic_poison_type, + .var_args_param_type, .none => unreachable, + _ => ip.indexToKey(index).typeOf(), + }; } /// Assumes that the enum's field indexes equal its value tags. diff --git a/src/Module.zig b/src/Module.zig index e24f4e501ca2..5b4d5c71af8e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6898,10 +6898,6 @@ pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.E } pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { - if (std.debug.runtime_safety) { - const tag = ty.zigTypeTag(mod); - assert(tag == .Int or tag == .ComptimeInt); - } if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); var limbs_buffer: [4]usize = undefined; diff --git a/src/Sema.zig b/src/Sema.zig index 8e09d5f378ee..c3512985113e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -848,13 +848,12 @@ pub fn analyzeBodyBreak( block: *Block, body: []const Zir.Inst.Index, ) CompileError!?BreakData { - const mod = sema.mod; const break_inst = sema.analyzeBodyInner(block, body) catch |err| switch (err) { error.ComptimeBreak => sema.comptime_break_inst, else => |e| return e, }; if (block.instructions.items.len != 0 and - sema.typeOf(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1])).isNoReturn(mod)) + sema.isNoReturn(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1]))) return null; const break_data = sema.code.instructions.items(.data)[break_inst].@"break"; const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data; @@ -9671,9 +9670,9 @@ fn intCast( // range to account for negative values. const dest_range_val = if (wanted_info.signedness == .signed) range_val: { const one = try mod.intValue(unsigned_operand_ty, 1); - const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, sema.mod); + const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, mod); break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty); - } else dest_max_val; + } else try mod.getCoerced(dest_max_val, unsigned_operand_ty); const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val); const ok = if (is_vector) ok: { @@ -10791,7 +10790,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError check_range: { if (operand_ty.zigTypeTag(mod) == .Int) { - const min_int = try operand_ty.minInt(mod); + const min_int = try operand_ty.minInt(mod, operand_ty); const max_int = try operand_ty.maxInt(mod, operand_ty); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { @@ -11647,7 +11646,7 @@ const RangeSetUnhandledIterator = struct { fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const mod = sema.mod; - const min = try ty.minInt(mod); + const min = try ty.minInt(mod, ty); const max = try ty.maxInt(mod, ty); return RangeSetUnhandledIterator{ @@ -12452,7 +12451,7 @@ fn zirShr( if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try mod.intValue(scalar_ty, bit_count); + const bit_count_val = try mod.intValue(rhs_ty.scalarType(mod), bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val)); @@ -13297,7 +13296,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13437,7 +13436,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13520,7 +13519,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13608,7 +13607,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13725,7 +13724,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13805,7 +13804,7 @@ fn addDivIntOverflowSafety( return; } - const min_int = try resolved_type.minInt(mod); + const min_int = try resolved_type.minInt(mod, resolved_type); const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1); const neg_one = try sema.splat(resolved_type, neg_one_scalar); @@ -13881,7 +13880,7 @@ fn addDivByZeroSafety( const scalar_zero = if (is_int) try mod.intValue(resolved_type.scalarType(mod), 0) else - try mod.floatValue(resolved_type.scalarType(mod), 0); + try mod.floatValue(resolved_type.scalarType(mod), 0.0); const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); @@ -13967,7 +13966,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -14575,7 +14574,8 @@ fn analyzeArithmetic( const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); + const scalar_type = resolved_type.scalarType(mod); + const scalar_tag = scalar_type.zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; @@ -14797,8 +14797,13 @@ fn analyzeArithmetic( // the result is nan. // If either of the operands are nan, the result is nan. const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), - .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), + else => unreachable, + }; + const scalar_one = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { @@ -14823,7 +14828,7 @@ fn analyzeArithmetic( const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } @@ -14854,7 +14859,7 @@ fn analyzeArithmetic( const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14887,8 +14892,8 @@ fn analyzeArithmetic( // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), - .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { @@ -14931,8 +14936,8 @@ fn analyzeArithmetic( // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), - .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { @@ -18817,7 +18822,10 @@ fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { { return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); } - return sema.addConstant(opt_ptr_stack_trace_ty, Value.null); + return sema.addConstant(opt_ptr_stack_trace_ty, (try mod.intern(.{ .opt = .{ + .ty = opt_ptr_stack_trace_ty.toIntern(), + .val = .none, + } })).toValue()); } fn zirFrame( @@ -20103,8 +20111,8 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (block.wantSafety()) { const back = try block.addTyOp(.int_to_float, operand_ty, result); const diff = try block.addBinOp(.sub, operand, back); - const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 1))); - const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, -1))); + const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, try mod.floatValue(operand_ty, 1.0))); + const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, try mod.floatValue(operand_ty, -1.0))); const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } @@ -22448,8 +22456,8 @@ fn analyzeMinMax( // Compute the final bounds based on the runtime type and the comptime-known bound type const min_val = switch (air_tag) { - .min => try unrefined_elem_ty.minInt(mod), - .max => try comptime_elem_ty.minInt(mod), // @max(ct, rt) >= ct + .min => try unrefined_elem_ty.minInt(mod, unrefined_elem_ty), + .max => try comptime_elem_ty.minInt(mod, comptime_elem_ty), // @max(ct, rt) >= ct else => unreachable, }; const max_val = switch (air_tag) { @@ -25996,7 +26004,7 @@ fn coerceExtra( if (dest_info.sentinel) |dest_sent| { if (array_ty.sentinel(mod)) |inst_sent| { - if (!dest_sent.eql(inst_sent, dst_elem_type, sema.mod)) { + if (!dest_sent.eql(inst_sent, dst_elem_type, mod)) { in_memory_result = .{ .ptr_sentinel = .{ .actual = inst_sent, .wanted = dest_sent, @@ -26115,7 +26123,7 @@ fn coerceExtra( if (inst_info.size == .Slice) { assert(dest_info.sentinel == null); if (inst_info.sentinel == null or - !inst_info.sentinel.?.eql(try mod.intValue(dest_info.pointee_type, 0), dest_info.pointee_type, sema.mod)) + !inst_info.sentinel.?.eql(try mod.intValue(dest_info.pointee_type, 0), dest_info.pointee_type, mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -26164,7 +26172,7 @@ fn coerceExtra( block, inst_src, "array literal requires address-of operator (&) to coerce to slice type '{}'", - .{dest_ty.fmt(sema.mod)}, + .{dest_ty.fmt(mod)}, ); } @@ -26190,7 +26198,7 @@ fn coerceExtra( // pointer to tuple to slice if (dest_info.mutable) { const err_msg = err_msg: { - const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(sema.mod)}); + const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(mod)}); errdefer err_msg.deinit(sema.gpa); try sema.errNote(block, dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{}); break :err_msg err_msg; @@ -26218,7 +26226,7 @@ fn coerceExtra( } if (dest_info.sentinel == null or inst_info.sentinel == null or - !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, sema.mod)) + !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -26244,7 +26252,7 @@ fn coerceExtra( block, inst_src, "fractional component prevents float value '{}' from coercion to type '{}'", - .{ val.fmtValue(inst_ty, sema.mod), dest_ty.fmt(sema.mod) }, + .{ val.fmtValue(inst_ty, mod), dest_ty.fmt(mod) }, ); } const result_val = try sema.floatToInt(block, inst_src, val, inst_ty, dest_ty); @@ -26258,7 +26266,7 @@ fn coerceExtra( // comptime-known integer to other number if (!(try sema.intFitsInType(val, dest_ty, null))) { if (!opts.report_err) return error.NotCoercible; - return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }); + return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) }); } return try sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } @@ -26296,12 +26304,12 @@ fn coerceExtra( } if (try sema.resolveMaybeUndefVal(inst)) |val| { const result_val = try val.floatCast(dest_ty, mod); - if (!val.eql(result_val, inst_ty, sema.mod)) { + if (!val.eql(try result_val.floatCast(inst_ty, mod), inst_ty, mod)) { return sema.fail( block, inst_src, "type '{}' cannot represent float value '{}'", - .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }, + .{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) }, ); } return try sema.addConstant(dest_ty, result_val); @@ -26329,7 +26337,7 @@ fn coerceExtra( } break :int; }; - const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, sema.mod, sema); + const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, mod, sema); // TODO implement this compile error //const int_again_val = try result_val.floatToInt(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty, mod)) { @@ -26337,7 +26345,7 @@ fn coerceExtra( // block, // inst_src, // "type '{}' cannot represent integer value '{}'", - // .{ dest_ty.fmt(sema.mod), val }, + // .{ dest_ty.fmt(mod), val }, // ); //} return try sema.addConstant(dest_ty, result_val); @@ -26359,7 +26367,7 @@ fn coerceExtra( block, inst_src, "no field named '{s}' in enum '{}'", - .{ bytes, dest_ty.fmt(sema.mod) }, + .{ bytes, dest_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -26375,7 +26383,7 @@ fn coerceExtra( .Union => blk: { // union to its own tag type const union_tag_ty = inst_ty.unionTagType(mod) orelse break :blk; - if (union_tag_ty.eql(dest_ty, sema.mod)) { + if (union_tag_ty.eql(dest_ty, mod)) { return sema.unionToTag(block, dest_ty, inst, inst_src); } }, @@ -26498,15 +26506,15 @@ fn coerceExtra( errdefer msg.destroy(sema.gpa); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "'noreturn' declared here", .{}); + const src_decl = mod.declPtr(sema.func.?.owner_decl); + try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "'noreturn' declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const msg = msg: { - const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), inst_ty.fmt(mod) }); errdefer msg.destroy(sema.gpa); // E!T to T @@ -26528,18 +26536,18 @@ fn coerceExtra( try in_memory_result.report(sema, block, inst_src, msg); // Add notes about function return type - if (opts.is_ret and sema.mod.test_functions.get(sema.func.?.owner_decl) == null) { + if (opts.is_ret and mod.test_functions.get(sema.func.?.owner_decl) == null) { const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); + const src_decl = mod.declPtr(sema.func.?.owner_decl); if (inst_ty.isError(mod) and !dest_ty.isError(mod)) { - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function cannot return an error", .{}); + try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function cannot return an error", .{}); } else { - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function return type declared here", .{}); + try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function return type declared here", .{}); } } if (try opts.param_src.get(sema)) |param_src| { - try sema.mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{}); + try mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{}); } // TODO maybe add "cannot store an error in type '{}'" note @@ -26679,7 +26687,7 @@ const InMemoryCoercionResult = union(enum) { }, .error_union_payload => |pair| { try sema.errNote(block, src, msg, "error union payload '{}' cannot cast into error union payload '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26692,18 +26700,18 @@ const InMemoryCoercionResult = union(enum) { .array_sentinel => |sentinel| { if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{ - sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod), }); } else { try sema.errNote(block, src, msg, "destination array requires '{}' sentinel", .{ - sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.wanted.fmtValue(sentinel.ty, mod), }); } break; }, .array_elem => |pair| { try sema.errNote(block, src, msg, "array element type '{}' cannot cast into array element type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26715,19 +26723,19 @@ const InMemoryCoercionResult = union(enum) { }, .vector_elem => |pair| { try sema.errNote(block, src, msg, "vector element type '{}' cannot cast into vector element type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, .optional_shape => |pair| { try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.optionalChild(mod).fmt(sema.mod), pair.wanted.optionalChild(mod).fmt(sema.mod), + pair.actual.optionalChild(mod).fmt(mod), pair.wanted.optionalChild(mod).fmt(mod), }); break; }, .optional_child => |pair| { try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26792,7 +26800,7 @@ const InMemoryCoercionResult = union(enum) { }, .fn_param => |param| { try sema.errNote(block, src, msg, "parameter {d} '{}' cannot cast into '{}'", .{ - param.index, param.actual.fmt(sema.mod), param.wanted.fmt(sema.mod), + param.index, param.actual.fmt(mod), param.wanted.fmt(mod), }); cur = param.child; }, @@ -26802,13 +26810,13 @@ const InMemoryCoercionResult = union(enum) { }, .fn_return_type => |pair| { try sema.errNote(block, src, msg, "return type '{}' cannot cast into return type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, .ptr_child => |pair| { try sema.errNote(block, src, msg, "pointer type child '{}' cannot cast into pointer type child '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26819,11 +26827,11 @@ const InMemoryCoercionResult = union(enum) { .ptr_sentinel => |sentinel| { if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{ - sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod), }); } else { try sema.errNote(block, src, msg, "destination pointer requires '{}' sentinel", .{ - sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.wanted.fmtValue(sentinel.ty, mod), }); } break; @@ -26847,11 +26855,11 @@ const InMemoryCoercionResult = union(enum) { const actual_allow_zero = pair.actual.ptrAllowsZero(mod); if (actual_allow_zero and !wanted_allow_zero) { try sema.errNote(block, src, msg, "'{}' could have null values which are illegal in type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); } else { try sema.errNote(block, src, msg, "mutable '{}' allows illegal null values stored to type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); } break; @@ -26877,13 +26885,13 @@ const InMemoryCoercionResult = union(enum) { }, .double_ptr_to_anyopaque => |pair| { try sema.errNote(block, src, msg, "cannot implicitly cast double pointer '{}' to anyopaque pointer '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); break; }, .slice_to_anyopaque => |pair| { try sema.errNote(block, src, msg, "cannot implicitly cast slice '{}' to anyopaque pointer '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); try sema.errNote(block, src, msg, "consider using '.ptr'", .{}); break; @@ -27616,25 +27624,24 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { const mod = sema.mod; const array_ty = sema.typeOf(ptr).childType(mod); if (array_ty.zigTypeTag(mod) != .Array) return null; - var ptr_inst = Air.refToIndex(ptr) orelse return null; + var ptr_ref = ptr; + var ptr_inst = Air.refToIndex(ptr_ref) orelse return null; const air_datas = sema.air_instructions.items(.data); const air_tags = sema.air_instructions.items(.tag); - const prev_ptr = while (air_tags[ptr_inst] == .bitcast) { - const prev_ptr = air_datas[ptr_inst].ty_op.operand; - const prev_ptr_ty = sema.typeOf(prev_ptr); - if (prev_ptr_ty.zigTypeTag(mod) != .Pointer) return null; - const prev_ptr_child_ty = prev_ptr_ty.childType(mod); - if (prev_ptr_child_ty.zigTypeTag(mod) == .Vector) break prev_ptr; - ptr_inst = Air.refToIndex(prev_ptr) orelse return null; + const vector_ty = while (air_tags[ptr_inst] == .bitcast) { + ptr_ref = air_datas[ptr_inst].ty_op.operand; + if (!sema.isKnownZigType(ptr_ref, .Pointer)) return null; + const child_ty = sema.typeOf(ptr_ref).childType(mod); + if (child_ty.zigTypeTag(mod) == .Vector) break child_ty; + ptr_inst = Air.refToIndex(ptr_ref) orelse return null; } else return null; // We have a pointer-to-array and a pointer-to-vector. If the elements and // lengths match, return the result. - const vector_ty = sema.typeOf(prev_ptr).childType(mod); if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and array_ty.arrayLen(mod) == vector_ty.vectorLen(mod)) { - return prev_ptr; + return ptr_ref; } else { return null; } @@ -34474,3 +34481,12 @@ fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool { }; return sema.typeOf(ref).isNoReturn(sema.mod); } + +/// Avoids crashing the compiler when asking if inferred allocations are known to be a certain type. +fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool { + if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) { + .inferred_alloc, .inferred_alloc_comptime => return false, + else => {}, + }; + return sema.typeOf(ref).zigTypeTag(sema.mod) == tag; +} diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 00f5b3f3daae..fca1b25a1d86 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4895,7 +4895,7 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { }); const sign_val = switch (tag) { - .neg => try vec_ty.minInt(mod), + .neg => try vec_ty.minInt(mod, vec_ty), .fabs => try vec_ty.maxInt(mod, vec_ty), else => unreachable, }; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index ab69514ee146..0c5e6e6c4884 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6723,7 +6723,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { }, .Max => switch (scalar_ty.zigTypeTag(mod)) { .Bool => try mod.intValue(scalar_ty, 0), - .Int => try scalar_ty.minInt(mod), + .Int => try scalar_ty.minInt(mod, scalar_ty), .Float => try mod.floatValue(scalar_ty, std.math.nan_f128), else => unreachable, }, diff --git a/src/type.zig b/src/type.zig index 5d6f77adf2e6..cb455d5ebe55 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2865,23 +2865,23 @@ pub const Type = struct { } // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, mod: *Module) !Value { - const scalar = try minIntScalar(ty.scalarType(mod), mod); + pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value { + const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty); return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), + .ty = dest_ty.toIntern(), .storage = .{ .repeated_elem = scalar.toIntern() }, } })).toValue() else scalar; } /// Asserts that the type is an integer. - pub fn minIntScalar(ty: Type, mod: *Module) !Value { + pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { const info = ty.intInfo(mod); - if (info.signedness == .unsigned) return mod.intValue(ty, 0); - if (info.bits == 0) return mod.intValue(ty, -1); + if (info.signedness == .unsigned) return mod.intValue(dest_ty, 0); + if (info.bits == 0) return mod.intValue(dest_ty, -1); if (std.math.cast(u6, info.bits - 1)) |shift| { const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); - return mod.intValue(Type.comptime_int, n); + return mod.intValue(dest_ty, n); } var res = try std.math.big.int.Managed.init(mod.gpa); @@ -2889,7 +2889,7 @@ pub const Type = struct { try res.setTwosCompIntLimit(.min, info.signedness, info.bits); - return mod.intValue_big(Type.comptime_int, res.toConst()); + return mod.intValue_big(dest_ty, res.toConst()); } // Works for vectors and vectors of integers. @@ -2897,7 +2897,7 @@ pub const Type = struct { pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty); return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), + .ty = dest_ty.toIntern(), .storage = .{ .repeated_elem = scalar.toIntern() }, } })).toValue() else scalar; } diff --git a/src/value.zig b/src/value.zig index 473b1c967cc7..23b90f40df6c 100644 --- a/src/value.zig +++ b/src/value.zig @@ -3166,6 +3166,11 @@ pub const Value = struct { .len = undefined, }; result_bigint.shiftLeft(lhs_bigint, shift); + if (ty.toIntern() != .comptime_int_type) { + const int_info = ty.intInfo(mod); + result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits); + } + return mod.intValue_big(ty, result_bigint.toConst()); } From e156c1c07ea3db3b2cb163ec7230ed8ecbf42859 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 May 2023 15:06:11 -0700 Subject: [PATCH 111/205] InternPool: correct the logic for struct size dump --- src/InternPool.zig | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index ea3bafaf4838..07e74ffe3587 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4435,6 +4435,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { const items_size = (1 + 4) * ip.items.len; const extra_size = 4 * ip.extra.items.len; const limbs_size = 8 * ip.limbs.items.len; + // TODO: fields size is not taken into account const structs_size = ip.allocated_structs.len * (@sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); const unions_size = ip.allocated_unions.len * @@ -4501,7 +4502,14 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit), .type_enum_auto => @sizeOf(EnumAuto), .type_opaque => @sizeOf(Key.OpaqueType), - .type_struct => @sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), + .type_struct => b: { + const struct_index = @intToEnum(Module.Struct.Index, data); + const struct_obj = ip.structPtrConst(struct_index); + break :b @sizeOf(Module.Struct) + + @sizeOf(Module.Namespace) + + @sizeOf(Module.Decl) + + (struct_obj.fields.count() * @sizeOf(Module.Struct.Field)); + }, .type_struct_ns => @sizeOf(Module.Namespace), .type_struct_anon => b: { const info = ip.extraData(TypeStructAnon, data); From 270f9e24ddb53334e2a4e5d7a7292a98441854b7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 May 2023 16:40:40 -0700 Subject: [PATCH 112/205] AstGen: generate tests with anyerror!void rather than inferred error sets. This avoids extra unnecessary work for the compiler since inferred error sets are unique types. --- src/AstGen.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index 6956a58ae4ed..17cf2aae64c8 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -4497,7 +4497,7 @@ fn testDecl( .cc_gz = null, .align_ref = .none, .align_gz = null, - .ret_ref = .void_type, + .ret_ref = .anyerror_void_error_union_type, .ret_gz = null, .section_ref = .none, .section_gz = null, @@ -4510,7 +4510,7 @@ fn testDecl( .body_gz = &fn_block, .lib_name = 0, .is_var_args = false, - .is_inferred_error = true, + .is_inferred_error = false, .is_test = true, .is_extern = false, .is_noinline = false, From 8011faa0049df757bab78310af824b283220bcac Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 May 2023 16:41:17 -0700 Subject: [PATCH 113/205] compiler: fix populateTestFunctions InternPool usage --- src/Module.zig | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 5b4d5c71af8e..1605bffdd90c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6454,8 +6454,8 @@ pub fn populateTestFunctions( for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = mod.declPtr(test_decl_index); + const test_decl_name = mem.span(test_decl.name); const test_name_decl_index = n: { - const test_decl_name = mem.span(test_decl.name); const test_name_decl_ty = try mod.arrayType(.{ .len = test_decl_name.len, .child = .u8_type, @@ -6478,10 +6478,17 @@ pub fn populateTestFunctions( try mod.intern(.{ .ptr = .{ .ty = .slice_const_u8_type, .addr = .{ .decl = test_name_decl_index }, + .len = try mod.intern(.{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = test_decl_name.len }, + } }), } }), // func try mod.intern(.{ .ptr = .{ - .ty = test_decl.ty.toIntern(), + .ty = try mod.intern(.{ .ptr_type = .{ + .elem_type = test_decl.ty.toIntern(), + .is_const = true, + } }), .addr = .{ .decl = test_decl_index }, } }), // async_frame_size From a596ea683c35d90c6af3e1fbeae2e06312ce392f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 May 2023 17:44:23 -0700 Subject: [PATCH 114/205] CLI: introduce --verbose-intern-pool and stop dumping to stderr without the user's consent. --- src/Compilation.zig | 15 +++++++++++---- src/main.zig | 5 +++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 30ac49995597..0ee916c44681 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -87,6 +87,7 @@ clang_preprocessor_mode: ClangPreprocessorMode, /// Whether to print clang argvs to stdout. verbose_cc: bool, verbose_air: bool, +verbose_intern_pool: bool, verbose_llvm_ir: ?[]const u8, verbose_llvm_bc: ?[]const u8, verbose_cimport: bool, @@ -593,6 +594,7 @@ pub const InitOptions = struct { verbose_cc: bool = false, verbose_link: bool = false, verbose_air: bool = false, + verbose_intern_pool: bool = false, verbose_llvm_ir: ?[]const u8 = null, verbose_llvm_bc: ?[]const u8 = null, verbose_cimport: bool = false, @@ -1574,6 +1576,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .clang_preprocessor_mode = options.clang_preprocessor_mode, .verbose_cc = options.verbose_cc, .verbose_air = options.verbose_air, + .verbose_intern_pool = options.verbose_intern_pool, .verbose_llvm_ir = options.verbose_llvm_ir, .verbose_llvm_bc = options.verbose_llvm_bc, .verbose_cimport = options.verbose_cimport, @@ -2026,10 +2029,12 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void try comp.performAllTheWork(main_progress_node); if (comp.bin_file.options.module) |module| { - std.debug.print("intern pool stats for '{s}':\n", .{ - comp.bin_file.options.root_name, - }); - module.intern_pool.dump(); + if (builtin.mode == .Debug and comp.verbose_intern_pool) { + std.debug.print("intern pool stats for '{s}':\n", .{ + comp.bin_file.options.root_name, + }); + module.intern_pool.dump(); + } if (comp.bin_file.options.is_test and comp.totalErrorCount() == 0) { // The `test_functions` decl has been intentionally postponed until now, @@ -5422,6 +5427,7 @@ fn buildOutputFromZig( .verbose_cc = comp.verbose_cc, .verbose_link = comp.bin_file.options.verbose_link, .verbose_air = comp.verbose_air, + .verbose_intern_pool = comp.verbose_intern_pool, .verbose_llvm_ir = comp.verbose_llvm_ir, .verbose_llvm_bc = comp.verbose_llvm_bc, .verbose_cimport = comp.verbose_cimport, @@ -5500,6 +5506,7 @@ pub fn build_crt_file( .verbose_cc = comp.verbose_cc, .verbose_link = comp.bin_file.options.verbose_link, .verbose_air = comp.verbose_air, + .verbose_intern_pool = comp.verbose_intern_pool, .verbose_llvm_ir = comp.verbose_llvm_ir, .verbose_llvm_bc = comp.verbose_llvm_bc, .verbose_cimport = comp.verbose_cimport, diff --git a/src/main.zig b/src/main.zig index 4acd305ce7df..5d666840c0ab 100644 --- a/src/main.zig +++ b/src/main.zig @@ -569,6 +569,7 @@ const usage_build_generic = \\ --verbose-link Display linker invocations \\ --verbose-cc Display C compiler invocations \\ --verbose-air Enable compiler debug output for Zig AIR + \\ --verbose-intern-pool Enable compiler debug output for InternPool \\ --verbose-llvm-ir[=path] Enable compiler debug output for unoptimized LLVM IR \\ --verbose-llvm-bc=[path] Enable compiler debug output for unoptimized LLVM BC \\ --verbose-cimport Enable compiler debug output for C imports @@ -735,6 +736,7 @@ fn buildOutputType( var verbose_link = (builtin.os.tag != .wasi or builtin.link_libc) and std.process.hasEnvVarConstant("ZIG_VERBOSE_LINK"); var verbose_cc = (builtin.os.tag != .wasi or builtin.link_libc) and std.process.hasEnvVarConstant("ZIG_VERBOSE_CC"); var verbose_air = false; + var verbose_intern_pool = false; var verbose_llvm_ir: ?[]const u8 = null; var verbose_llvm_bc: ?[]const u8 = null; var verbose_cimport = false; @@ -1460,6 +1462,8 @@ fn buildOutputType( verbose_cc = true; } else if (mem.eql(u8, arg, "--verbose-air")) { verbose_air = true; + } else if (mem.eql(u8, arg, "--verbose-intern-pool")) { + verbose_intern_pool = true; } else if (mem.eql(u8, arg, "--verbose-llvm-ir")) { verbose_llvm_ir = "-"; } else if (mem.startsWith(u8, arg, "--verbose-llvm-ir=")) { @@ -3156,6 +3160,7 @@ fn buildOutputType( .verbose_cc = verbose_cc, .verbose_link = verbose_link, .verbose_air = verbose_air, + .verbose_intern_pool = verbose_intern_pool, .verbose_llvm_ir = verbose_llvm_ir, .verbose_llvm_bc = verbose_llvm_bc, .verbose_cimport = verbose_cimport, From fc358435cb5cbcc21967af438b190d4e18bba9ae Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 May 2023 20:21:17 -0700 Subject: [PATCH 115/205] C backend: InternPool fixes --- src/codegen.zig | 3 +- src/codegen/c.zig | 295 +++++++++++++++++++++++++++------------------- 2 files changed, 174 insertions(+), 124 deletions(-) diff --git a/src/codegen.zig b/src/codegen.zig index 28fd59a66a25..1ae6d6ce0626 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -443,7 +443,8 @@ pub fn generateSymbol( }, .anon_struct_type => |tuple| { const struct_begin = code.items.len; - for (tuple.types, 0..) |field_ty, index| { + for (tuple.types, tuple.values, 0..) |field_ty, comptime_val, index| { + if (comptime_val != .none) continue; if (!field_ty.toType().hasRuntimeBits(mod)) continue; const field_val = switch (aggregate.storage) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 0c5e6e6c4884..a8e207765272 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1110,14 +1110,16 @@ pub const DeclGen = struct { .undef, .runtime_value => unreachable, // handled above .simple_value => |simple_value| switch (simple_value) { - .undefined, - .void, - .null, - .empty_struct, - .@"unreachable", - .generic_poison, - => unreachable, // non-runtime values - .false, .true => try writer.writeAll(@tagName(simple_value)), + // non-runtime values + .undefined => unreachable, + .void => unreachable, + .null => unreachable, + .empty_struct => unreachable, + .@"unreachable" => unreachable, + .generic_poison => unreachable, + + .false => try writer.writeAll("false"), + .true => try writer.writeAll("true"), }, .variable, .extern_func, @@ -1138,10 +1140,10 @@ pub const DeclGen = struct { .error_union => |error_union| { const payload_ty = ty.errorUnionPayload(mod); const error_ty = ty.errorUnionSet(mod); - const error_val = if (val.errorUnionIsPayload(mod)) try mod.intValue(Type.anyerror, 0) else val; + const error_val = if (val.errorUnionIsPayload(mod)) try mod.intValue(Type.err_int, 0) else val; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return dg.renderValue(writer, error_ty, error_val, location); + return dg.renderValue(writer, Type.err_int, error_val, location); } if (!location.isInitializer()) { @@ -1305,12 +1307,10 @@ pub const DeclGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return dg.renderValue(writer, Type.bool, is_null_val, location); - if (ty.optionalReprIsPayload(mod)) { - return dg.renderValue(writer, payload_ty, switch (opt.val) { - .none => try mod.intValue(payload_ty, 0), - else => opt.val.toValue(), - }, location); - } + if (ty.optionalReprIsPayload(mod)) switch (opt.val) { + .none => return writer.writeByte('0'), + else => return dg.renderValue(writer, payload_ty, opt.val.toValue(), location), + }; if (!location.isInitializer()) { try writer.writeByte('('); @@ -1327,7 +1327,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); try writer.writeAll(" }"); }, - .aggregate => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.ip_index)) { .array_type, .vector_type => { if (location == .FunctionArgument) { try writer.writeByte('('); @@ -1385,131 +1385,179 @@ pub const DeclGen = struct { try writer.writeByte('}'); } }, - .struct_type, .anon_struct_type => switch (ty.containerLayout(mod)) { - .Auto, .Extern => { - const field_vals = val.castTag(.aggregate).?.data; - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - try writer.writeByte('{'); - var empty = true; - for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i, mod)) continue; - const field_ty = ty.structFieldType(field_i, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - - if (!empty) try writer.writeByte(','); - try dg.renderValue(writer, field_ty, field_val, initializer_type); + .anon_struct_type => |tuple| { + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } - empty = false; - } - try writer.writeByte('}'); - }, - .Packed => { - const field_vals = val.castTag(.aggregate).?.data; - const int_info = ty.intInfo(mod); + try writer.writeByte('{'); + var empty = true; + for (tuple.types, tuple.values, 0..) |field_ty, comptime_ty, field_i| { + if (comptime_ty != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; - const bits = Type.smallestUnsignedBits(int_info.bits - 1); - const bit_offset_ty = try mod.intType(.unsigned, bits); + if (!empty) try writer.writeByte(','); - var bit_offset: u64 = 0; + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field_ty, + .storage = .{ .u64 = bytes[field_i] }, + } }), + .elems => |elems| elems[field_i], + .repeated_elem => |elem| elem, + }; + try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), initializer_type); - var eff_num_fields: usize = 0; - for (0..field_vals.len) |field_i| { - if (ty.structFieldIsComptime(field_i, mod)) continue; - const field_ty = ty.structFieldType(field_i, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + empty = false; + } + try writer.writeByte('}'); + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + switch (struct_obj.layout) { + .Auto, .Extern => { + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } - eff_num_fields += 1; - } + try writer.writeByte('{'); + var empty = true; + for (struct_obj.fields.values(), 0..) |field, field_i| { + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + if (!empty) try writer.writeByte(','); + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field.ty.toIntern(), + .storage = .{ .u64 = bytes[field_i] }, + } }), + .elems => |elems| elems[field_i], + .repeated_elem => |elem| elem, + }; + try dg.renderValue(writer, field.ty, field_val.toValue(), initializer_type); - if (eff_num_fields == 0) { - try writer.writeByte('('); - try dg.renderValue(writer, ty, Value.undef, initializer_type); - try writer.writeByte(')'); - } else if (ty.bitSize(mod) > 64) { - // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) - var num_or = eff_num_fields - 1; - while (num_or > 0) : (num_or -= 1) { - try writer.writeAll("zig_or_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte('('); + empty = false; } + try writer.writeByte('}'); + }, + .Packed => { + const int_info = ty.intInfo(mod); - var eff_index: usize = 0; - var needs_closing_paren = false; - for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i, mod)) continue; - const field_ty = ty.structFieldType(field_i, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + const bits = Type.smallestUnsignedBits(int_info.bits - 1); + const bit_offset_ty = try mod.intType(.unsigned, bits); - const cast_context = IntCastContext{ .value = .{ .value = field_val } }; - if (bit_offset != 0) { - try writer.writeAll("zig_shl_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte('('); - try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); - try writer.writeAll(", "); - const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); - try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); - try writer.writeByte(')'); - } else { - try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); - } + var bit_offset: u64 = 0; + var eff_num_fields: usize = 0; - if (needs_closing_paren) try writer.writeByte(')'); - if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); + for (struct_obj.fields.values()) |field| { + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - bit_offset += field_ty.bitSize(mod); - needs_closing_paren = true; - eff_index += 1; + eff_num_fields += 1; } - } else { - try writer.writeByte('('); - // a << a_off | b << b_off | c << c_off - var empty = true; - for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i, mod)) continue; - const field_ty = ty.structFieldType(field_i, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - if (!empty) try writer.writeAll(" | "); + if (eff_num_fields == 0) { try writer.writeByte('('); - try dg.renderType(writer, ty); + try dg.renderValue(writer, ty, Value.undef, initializer_type); try writer.writeByte(')'); + } else if (ty.bitSize(mod) > 64) { + // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) + var num_or = eff_num_fields - 1; + while (num_or > 0) : (num_or -= 1) { + try writer.writeAll("zig_or_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + } - if (bit_offset != 0) { - try dg.renderValue(writer, field_ty, field_val, .Other); - try writer.writeAll(" << "); - const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); - try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); - } else { - try dg.renderValue(writer, field_ty, field_val, .Other); + var eff_index: usize = 0; + var needs_closing_paren = false; + for (struct_obj.fields.values(), 0..) |field, field_i| { + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field.ty.toIntern(), + .storage = .{ .u64 = bytes[field_i] }, + } }), + .elems => |elems| elems[field_i], + .repeated_elem => |elem| elem, + }; + const cast_context = IntCastContext{ .value = .{ .value = field_val.toValue() } }; + if (bit_offset != 0) { + try writer.writeAll("zig_shl_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument); + try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + try writer.writeByte(')'); + } else { + try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument); + } + + if (needs_closing_paren) try writer.writeByte(')'); + if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); + + bit_offset += field.ty.bitSize(mod); + needs_closing_paren = true; + eff_index += 1; } + } else { + try writer.writeByte('('); + // a << a_off | b << b_off | c << c_off + var empty = true; + for (struct_obj.fields.values(), 0..) |field, field_i| { + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - bit_offset += field_ty.bitSize(mod); - empty = false; + if (!empty) try writer.writeAll(" | "); + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field.ty.toIntern(), + .storage = .{ .u64 = bytes[field_i] }, + } }), + .elems => |elems| elems[field_i], + .repeated_elem => |elem| elem, + }; + + if (bit_offset != 0) { + try dg.renderValue(writer, field.ty, field_val.toValue(), .Other); + try writer.writeAll(" << "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + } else { + try dg.renderValue(writer, field.ty, field_val.toValue(), .Other); + } + + bit_offset += field.ty.bitSize(mod); + empty = false; + } + try writer.writeByte(')'); } - try writer.writeByte(')'); - } - }, + }, + } }, else => unreachable, }, - .un => { - const union_obj = val.castTag(.@"union").?.data; - + .un => |un| { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); } - const field_i = ty.unionTagFieldIndex(union_obj.tag, mod).?; + const field_i = ty.unionTagFieldIndex(un.tag.toValue(), mod).?; const field_ty = ty.unionFields(mod).values()[field_i].ty; const field_name = ty.unionFields(mod).keys()[field_i]; if (ty.containerLayout(mod) == .Packed) { @@ -1523,7 +1571,7 @@ pub const DeclGen = struct { try dg.renderType(writer, ty); try writer.writeByte(')'); } - try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); + try dg.renderValue(writer, field_ty, un.val.toValue(), initializer_type); } else { try writer.writeAll("0"); } @@ -1535,7 +1583,7 @@ pub const DeclGen = struct { const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); - try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type); + try dg.renderValue(writer, tag_ty, un.tag.toValue(), initializer_type); } if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}'); if (layout.tag_size != 0) try writer.writeByte(','); @@ -1543,7 +1591,7 @@ pub const DeclGen = struct { } if (field_ty.hasRuntimeBits(mod)) { try writer.print(" .{ } = ", .{fmtIdent(field_name)}); - try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); + try dg.renderValue(writer, field_ty, un.val.toValue(), initializer_type); try writer.writeByte(' '); } else for (ty.unionFields(mod).values()) |field| { if (!field.ty.hasRuntimeBits(mod)) continue; @@ -5113,13 +5161,14 @@ fn airIsNull( TypedValue{ .ty = Type.bool, .val = Value.true } else if (optional_ty.isPtrLikeOptional(mod)) // operand is a regular pointer, test `operand !=/== NULL` - TypedValue{ .ty = optional_ty, .val = Value.null } + TypedValue{ .ty = optional_ty, .val = try mod.nullValue(optional_ty) } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) - TypedValue{ .ty = payload_ty, .val = try mod.intValue(payload_ty, 0) } + TypedValue{ .ty = Type.err_int, .val = try mod.intValue(Type.err_int, 0) } else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); const slice_ptr_ty = payload_ty.slicePtrFieldType(mod); - break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null }; + const opt_slice_ptr_ty = try mod.optionalType(slice_ptr_ty.toIntern()); + break :rhs TypedValue{ .ty = opt_slice_ptr_ty, .val = try mod.nullValue(opt_slice_ptr_ty) }; } else rhs: { try writer.writeAll(".is_null"); break :rhs TypedValue{ .ty = Type.bool, .val = Value.true }; @@ -5781,7 +5830,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { else try f.writeCValueMember(writer, local, .{ .identifier = "error" }); try a.assign(f, writer); - try f.object.dg.renderValue(writer, err_ty, try mod.intValue(err_ty, 0), .Other); + try f.object.dg.renderValue(writer, Type.err_int, try mod.intValue(Type.err_int, 0), .Other); try a.end(f, writer); } return local; @@ -5812,11 +5861,11 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const else try f.writeCValue(writer, operand, .Other) else - try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); + try f.object.dg.renderValue(writer, Type.err_int, try mod.intValue(Type.err_int, 0), .Other); try writer.writeByte(' '); try writer.writeAll(operator); try writer.writeByte(' '); - try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); + try f.object.dg.renderValue(writer, Type.err_int, try mod.intValue(Type.err_int, 0), .Other); try writer.writeAll(";\n"); return local; } From c8b0d4d149c891ed83db57fe6986d10c5dd654af Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 May 2023 21:14:18 -0700 Subject: [PATCH 116/205] InternPool: optimize zigTypeTag() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a particularly hot function, so we operate directly on encodings rather than the more straightforward implementation of calling `indexToKey`. I measured this as 1.05 ± 0.04 times faster than the previous commit with a ReleaseFast build against hello world (which includes std.debug and formatted printing). I also profiled the function and found that zigTypeTag() went from being a major caller of `indexToKey` to being completely insignificant due to being so fast. --- src/InternPool.zig | 201 +++++++++++++++++++++++++++++++++++++++++++++ src/type.zig | 87 +------------------- 2 files changed, 202 insertions(+), 86 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 07e74ffe3587..fdc0a9af3ae9 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4899,3 +4899,204 @@ pub fn isNoReturn(ip: InternPool, ty: Index) bool { }, }; } + +/// This is a particularly hot function, so we operate directly on encodings +/// rather than the more straightforward implementation of calling `indexToKey`. +pub fn zigTypeTagOrPoison(ip: InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId { + return switch (index) { + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => .Int, + + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + => .Float, + + .anyopaque_type => .Opaque, + .bool_type => .Bool, + .void_type => .Void, + .type_type => .Type, + .anyerror_type => .ErrorSet, + .comptime_int_type => .ComptimeInt, + .comptime_float_type => .ComptimeFloat, + .noreturn_type => .NoReturn, + .anyframe_type => .AnyFrame, + .null_type => .Null, + .undefined_type => .Undefined, + .enum_literal_type => .EnumLiteral, + + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + => .Enum, + + .prefetch_options_type, + .export_options_type, + .extern_options_type, + => .Struct, + + .type_info_type => .Union, + + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + => .Pointer, + + .anyerror_void_error_union_type => .ErrorUnion, + .empty_struct_type => .Struct, + + .generic_poison_type => return error.GenericPoison, + + // values, not types + .undef => unreachable, + .zero => unreachable, + .zero_usize => unreachable, + .zero_u8 => unreachable, + .one => unreachable, + .one_usize => unreachable, + .one_u8 => unreachable, + .four_u8 => unreachable, + .negative_one => unreachable, + .calling_convention_c => unreachable, + .calling_convention_inline => unreachable, + .void_value => unreachable, + .unreachable_value => unreachable, + .null_value => unreachable, + .bool_true => unreachable, + .bool_false => unreachable, + .empty_struct => unreachable, + .generic_poison => unreachable, + + .var_args_param_type => unreachable, // special tag + + _ => switch (ip.items.items(.tag)[@enumToInt(index)]) { + .type_int_signed, + .type_int_unsigned, + => .Int, + + .type_array_big, + .type_array_small, + => .Array, + + .type_vector => .Vector, + + .type_pointer, + .type_slice, + => .Pointer, + + .type_optional => .Optional, + .type_anyframe => .AnyFrame, + .type_error_union => .ErrorUnion, + + .type_error_set, + .type_inferred_error_set, + => .ErrorSet, + + .type_enum_auto, + .type_enum_explicit, + .type_enum_nonexhaustive, + => .Enum, + + .simple_type => unreachable, // handled via Index tag above + + .type_opaque => .Opaque, + + .type_struct, + .type_struct_ns, + .type_struct_anon, + .type_tuple_anon, + => .Struct, + + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + => .Union, + + .type_function => .Fn, + + // values, not types + .undef, + .runtime_value, + .simple_value, + .ptr_mut_decl, + .ptr_decl, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_comptime_field, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .opt_null, + .int_u8, + .int_u16, + .int_u32, + .int_i32, + .int_usize, + .int_comptime_int_u32, + .int_comptime_int_i32, + .int_small, + .int_positive, + .int_negative, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .error_union_payload, + .enum_literal, + .enum_tag, + .float_f16, + .float_f32, + .float_f64, + .float_f80, + .float_f128, + .float_c_longdouble_f80, + .float_c_longdouble_f128, + .float_comptime_float, + .variable, + .extern_func, + .func, + .only_possible_value, + .union_value, + .bytes, + .aggregate, + .repeated, + => unreachable, + }, + .none => unreachable, // special tag + }; +} diff --git a/src/type.zig b/src/type.zig index cb455d5ebe55..a9ad8b94fd64 100644 --- a/src/type.zig +++ b/src/type.zig @@ -23,92 +23,7 @@ pub const Type = struct { } pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .int_type => .Int, - .ptr_type => .Pointer, - .array_type => .Array, - .vector_type => .Vector, - .opt_type => .Optional, - .error_union_type => .ErrorUnion, - .error_set_type, .inferred_error_set_type => .ErrorSet, - .struct_type, .anon_struct_type => .Struct, - .union_type => .Union, - .opaque_type => .Opaque, - .enum_type => .Enum, - .func_type => .Fn, - .anyframe_type => .AnyFrame, - .simple_type => |s| switch (s) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - => .Float, - - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - => .Int, - - .anyopaque => .Opaque, - .bool => .Bool, - .void => .Void, - .type => .Type, - .anyerror => .ErrorSet, - .comptime_int => .ComptimeInt, - .comptime_float => .ComptimeFloat, - .noreturn => .NoReturn, - .null => .Null, - .undefined => .Undefined, - .enum_literal => .EnumLiteral, - - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - => .Enum, - - .prefetch_options, - .export_options, - .extern_options, - => .Struct, - - .type_info => .Union, - - .generic_poison => return error.GenericPoison, - }, - - // values, not types - .undef, - .runtime_value, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .float, - .ptr, - .opt, - .aggregate, - .un, - => unreachable, - }; + return mod.intern_pool.zigTypeTagOrPoison(ty.toIntern()); } pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId { From 2d5bc0146941f4cc207c4fd23058e25a16fd40a7 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 26 May 2023 21:22:34 -0400 Subject: [PATCH 117/205] behavior: get more test cases passing with llvm --- src/InternPool.zig | 409 +++++++++++++-------- src/Module.zig | 83 ++--- src/RangeSet.zig | 58 +-- src/Sema.zig | 671 +++++++++++++++++----------------- src/codegen.zig | 2 +- src/codegen/llvm.zig | 34 +- src/codegen/spirv.zig | 50 +-- src/type.zig | 35 +- src/value.zig | 200 +++------- tools/lldb_pretty_printers.py | 6 +- 10 files changed, 749 insertions(+), 799 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index fdc0a9af3ae9..7af91529c19c 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -621,8 +621,7 @@ pub const Key = union(enum) { pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash, ip: *const InternPool) void { const KeyTag = @typeInfo(Key).Union.tag_type.?; - const key_tag: KeyTag = key; - std.hash.autoHash(hasher, key_tag); + std.hash.autoHash(hasher, @as(KeyTag, key)); switch (key) { inline .int_type, .ptr_type, @@ -710,39 +709,58 @@ pub const Key = union(enum) { .aggregate => |aggregate| { std.hash.autoHash(hasher, aggregate.ty); - switch (ip.indexToKey(aggregate.ty)) { - .array_type => |array_type| if (array_type.child == .u8_type) { - switch (aggregate.storage) { - .bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte), - .elems => |elems| { - var buffer: Key.Int.Storage.BigIntSpace = undefined; - for (elems) |elem| std.hash.autoHash( + const len = ip.aggregateTypeLen(aggregate.ty); + const child = switch (ip.indexToKey(aggregate.ty)) { + .array_type => |array_type| array_type.child, + .vector_type => |vector_type| vector_type.child, + .anon_struct_type, .struct_type => .none, + else => unreachable, + }; + + if (child == .u8_type) { + switch (aggregate.storage) { + .bytes => |bytes| for (bytes[0..@intCast(usize, len)]) |byte| { + std.hash.autoHash(hasher, KeyTag.int); + std.hash.autoHash(hasher, byte); + }, + .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| { + const elem_key = ip.indexToKey(elem); + std.hash.autoHash(hasher, @as(KeyTag, elem_key)); + switch (elem_key) { + .undef => {}, + .int => |int| std.hash.autoHash( hasher, - ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch - unreachable, - ); - }, - .repeated_elem => |elem| { - const len = ip.aggregateTypeLen(aggregate.ty); - var buffer: Key.Int.Storage.BigIntSpace = undefined; - const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch - unreachable; - var i: u64 = 0; - while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); - }, - } - return; - }, - else => {}, + @intCast(u8, int.storage.u64), + ), + else => unreachable, + } + }, + .repeated_elem => |elem| { + const elem_key = ip.indexToKey(elem); + var remaining = len; + while (remaining > 0) : (remaining -= 1) { + std.hash.autoHash(hasher, @as(KeyTag, elem_key)); + switch (elem_key) { + .undef => {}, + .int => |int| std.hash.autoHash( + hasher, + @intCast(u8, int.storage.u64), + ), + else => unreachable, + } + } + }, + } + return; } switch (aggregate.storage) { .bytes => unreachable, - .elems => |elems| for (elems) |elem| std.hash.autoHash(hasher, elem), + .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| + std.hash.autoHash(hasher, elem), .repeated_elem => |elem| { - const len = ip.aggregateTypeLen(aggregate.ty); - var i: u64 = 0; - while (i < len) : (i += 1) std.hash.autoHash(hasher, elem); + var remaining = len; + while (remaining > 0) : (remaining -= 1) std.hash.autoHash(hasher, elem); }, } }, @@ -960,9 +978,10 @@ pub const Key = union(enum) { const b_info = b.aggregate; if (a_info.ty != b_info.ty) return false; + const len = ip.aggregateTypeLen(a_info.ty); const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?; if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) { - for (0..@intCast(usize, ip.aggregateTypeLen(a_info.ty))) |elem_index| { + for (0..@intCast(usize, len)) |elem_index| { const a_elem = switch (a_info.storage) { .bytes => |bytes| ip.getIfExists(.{ .int = .{ .ty = .u8_type, @@ -987,11 +1006,19 @@ pub const Key = union(enum) { switch (a_info.storage) { .bytes => |a_bytes| { const b_bytes = b_info.storage.bytes; - return std.mem.eql(u8, a_bytes, b_bytes); + return std.mem.eql( + u8, + a_bytes[0..@intCast(usize, len)], + b_bytes[0..@intCast(usize, len)], + ); }, .elems => |a_elems| { const b_elems = b_info.storage.elems; - return std.mem.eql(Index, a_elems, b_elems); + return std.mem.eql( + Index, + a_elems[0..@intCast(usize, len)], + b_elems[0..@intCast(usize, len)], + ); }, .repeated_elem => |a_elem| { const b_elem = b_info.storage.repeated_elem; @@ -2691,7 +2718,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .bytes => { const extra = ip.extraData(Bytes, data); - const len = @intCast(u32, ip.aggregateTypeLen(extra.ty)); + const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.ty)); return .{ .aggregate = .{ .ty = extra.ty, .storage = .{ .bytes = ip.string_bytes.items[@enumToInt(extra.bytes)..][0..len] }, @@ -2699,7 +2726,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .aggregate => { const extra = ip.extraDataTrail(Aggregate, data); - const len = @intCast(u32, ip.aggregateTypeLen(extra.data.ty)); + const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.data.ty)); const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]); return .{ .aggregate = .{ .ty = extra.data.ty, @@ -3145,7 +3172,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }), }), .int => |int| { - assert(int != .none); + assert(ip.typeOf(int) == .usize_type); ip.items.appendAssumeCapacity(.{ .tag = .ptr_int, .data = try ip.addExtra(gpa, PtrAddr{ @@ -3452,7 +3479,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .enum_tag => |enum_tag| { assert(ip.isEnumType(enum_tag.ty)); - assert(ip.indexToKey(enum_tag.int) == .int); + switch (ip.indexToKey(enum_tag.ty)) { + .simple_type => assert(ip.isIntegerType(ip.typeOf(enum_tag.int))), + .enum_type => |enum_type| assert(ip.typeOf(enum_tag.int) == enum_type.tag_ty), + else => unreachable, + } ip.items.appendAssumeCapacity(.{ .tag = .enum_tag, .data = try ip.addExtra(gpa, enum_tag), @@ -3501,21 +3532,43 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .aggregate => |aggregate| { const ty_key = ip.indexToKey(aggregate.ty); - const aggregate_len = ip.aggregateTypeLen(aggregate.ty); + const len = ip.aggregateTypeLen(aggregate.ty); + const child = switch (ty_key) { + .array_type => |array_type| array_type.child, + .vector_type => |vector_type| vector_type.child, + .anon_struct_type, .struct_type => .none, + else => unreachable, + }; + const sentinel = switch (ty_key) { + .array_type => |array_type| array_type.sentinel, + .vector_type, .anon_struct_type, .struct_type => .none, + else => unreachable, + }; + const len_including_sentinel = len + @boolToInt(sentinel != .none); switch (aggregate.storage) { .bytes => |bytes| { - assert(ty_key.array_type.child == .u8_type); - assert(bytes.len == aggregate_len); + assert(child == .u8_type); + if (bytes.len != len) { + assert(bytes.len == len_including_sentinel); + assert(bytes[len] == ip.indexToKey(sentinel).int.storage.u64); + unreachable; + } }, .elems => |elems| { - assert(elems.len == aggregate_len); + if (elems.len != len) { + assert(elems.len == len_including_sentinel); + assert(elems[len] == sentinel); + unreachable; + } + }, + .repeated_elem => |elem| { + assert(sentinel == .none or elem == sentinel); }, - .repeated_elem => {}, } switch (ty_key) { - inline .array_type, .vector_type => |seq_type| { + .array_type, .vector_type => { for (aggregate.storage.values()) |elem| { - assert(ip.typeOf(elem) == seq_type.child); + assert(ip.typeOf(elem) == child); } }, .struct_type => |struct_type| { @@ -3534,7 +3587,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { else => unreachable, } - if (aggregate_len == 0) { + if (len == 0) { ip.items.appendAssumeCapacity(.{ .tag = .only_possible_value, .data = @enumToInt(aggregate.ty), @@ -3543,41 +3596,43 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } switch (ty_key) { - .anon_struct_type => |anon_struct_type| { - if (switch (aggregate.storage) { + .anon_struct_type => |anon_struct_type| opv: { + switch (aggregate.storage) { .bytes => |bytes| for (anon_struct_type.values, bytes) |value, byte| { if (value != ip.getIfExists(.{ .int = .{ .ty = .u8_type, .storage = .{ .u64 = byte }, - } })) break false; - } else true, - .elems => |elems| std.mem.eql(Index, anon_struct_type.values, elems), + } })) break :opv; + }, + .elems => |elems| if (!std.mem.eql( + Index, + anon_struct_type.values, + elems, + )) break :opv, .repeated_elem => |elem| for (anon_struct_type.values) |value| { - if (value != elem) break false; - } else true, - }) { - // This encoding works thanks to the fact that, as we just verified, - // the type itself contains a slice of values that can be provided - // in the aggregate fields. - ip.items.appendAssumeCapacity(.{ - .tag = .only_possible_value, - .data = @enumToInt(aggregate.ty), - }); - return @intToEnum(Index, ip.items.len - 1); + if (value != elem) break :opv; + }, } + // This encoding works thanks to the fact that, as we just verified, + // the type itself contains a slice of values that can be provided + // in the aggregate fields. + ip.items.appendAssumeCapacity(.{ + .tag = .only_possible_value, + .data = @enumToInt(aggregate.ty), + }); + return @intToEnum(Index, ip.items.len - 1); }, else => {}, } - if (switch (aggregate.storage) { - .bytes => |bytes| for (bytes[1..]) |byte| { - if (byte != bytes[0]) break false; - } else true, - .elems => |elems| for (elems[1..]) |elem| { - if (elem != elems[0]) break false; - } else true, - .repeated_elem => true, - }) { + repeated: { + switch (aggregate.storage) { + .bytes => |bytes| for (bytes[1..@intCast(usize, len)]) |byte| + if (byte != bytes[0]) break :repeated, + .elems => |elems| for (elems[1..@intCast(usize, len)]) |elem| + if (elem != elems[0]) break :repeated, + .repeated_elem => {}, + } const elem = switch (aggregate.storage) { .bytes => |bytes| elem: { _ = ip.map.pop(); @@ -3607,42 +3662,48 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } - switch (ty_key) { - .array_type => |array_type| if (array_type.child == .u8_type) { - const len_including_sentinel = aggregate_len + @boolToInt(array_type.sentinel != .none); - try ip.string_bytes.ensureUnusedCapacity(gpa, len_including_sentinel + 1); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); - var buffer: Key.Int.Storage.BigIntSpace = undefined; - switch (aggregate.storage) { - .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes), - .elems => |elems| for (elems) |elem| ip.string_bytes.appendAssumeCapacity( - ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable, + if (child == .u8_type) bytes: { + const string_bytes_index = ip.string_bytes.items.len; + try ip.string_bytes.ensureUnusedCapacity(gpa, len_including_sentinel + 1); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); + switch (aggregate.storage) { + .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes), + .elems => |elems| for (elems) |elem| switch (ip.indexToKey(elem)) { + .undef => { + ip.string_bytes.shrinkRetainingCapacity(string_bytes_index); + break :bytes; + }, + .int => |int| ip.string_bytes.appendAssumeCapacity( + @intCast(u8, int.storage.u64), ), - .repeated_elem => |elem| @memset( - ip.string_bytes.addManyAsSliceAssumeCapacity(aggregate_len), - ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable, + else => unreachable, + }, + .repeated_elem => |elem| switch (ip.indexToKey(elem)) { + .undef => break :bytes, + .int => |int| @memset( + ip.string_bytes.addManyAsSliceAssumeCapacity(len), + @intCast(u8, int.storage.u64), ), - } - if (array_type.sentinel != .none) ip.string_bytes.appendAssumeCapacity( - ip.indexToKey(array_type.sentinel).int.storage.toBigInt(&buffer).to(u8) catch - unreachable, - ); - const bytes = try ip.getOrPutTrailingString(gpa, len_including_sentinel); - ip.items.appendAssumeCapacity(.{ - .tag = .bytes, - .data = ip.addExtraAssumeCapacity(Bytes{ - .ty = aggregate.ty, - .bytes = bytes.toString(), - }), - }); - return @intToEnum(Index, ip.items.len - 1); - }, - else => {}, + else => unreachable, + }, + } + if (sentinel != .none) ip.string_bytes.appendAssumeCapacity( + @intCast(u8, ip.indexToKey(sentinel).int.storage.u64), + ); + const bytes = try ip.getOrPutTrailingString(gpa, len_including_sentinel); + ip.items.appendAssumeCapacity(.{ + .tag = .bytes, + .data = ip.addExtraAssumeCapacity(Bytes{ + .ty = aggregate.ty, + .bytes = bytes.toString(), + }), + }); + return @intToEnum(Index, ip.items.len - 1); } try ip.extra.ensureUnusedCapacity( gpa, - @typeInfo(Aggregate).Struct.fields.len + aggregate_len, + @typeInfo(Aggregate).Struct.fields.len + len_including_sentinel, ); ip.items.appendAssumeCapacity(.{ .tag = .aggregate, @@ -3651,6 +3712,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }), }); ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.storage.elems)); + if (sentinel != .none) ip.extra.appendAssumeCapacity(@enumToInt(sentinel)); }, .un => |un| { @@ -4183,10 +4245,12 @@ pub fn sliceLen(ip: InternPool, i: Index) Index { /// Given an existing value, returns the same value but with the supplied type. /// Only some combinations are allowed: /// * identity coercion +/// * undef => any /// * int <=> int /// * int <=> enum /// * enum_literal => enum /// * ptr <=> ptr +/// * int => ptr /// * null_value => opt /// * payload => opt /// * error set <=> error set @@ -4194,68 +4258,93 @@ pub fn sliceLen(ip: InternPool, i: Index) Index { /// * error set => error union /// * payload => error union /// * fn <=> fn +/// * array <=> array +/// * array <=> vector +/// * vector <=> vector pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { const old_ty = ip.typeOf(val); if (old_ty == new_ty) return val; - switch (ip.indexToKey(val)) { - .extern_func => |extern_func| if (ip.isFunctionType(new_ty)) - return ip.get(gpa, .{ .extern_func = .{ - .ty = new_ty, - .decl = extern_func.decl, - .lib_name = extern_func.lib_name, - } }), - .func => |func| if (ip.isFunctionType(new_ty)) - return ip.get(gpa, .{ .func = .{ - .ty = new_ty, - .index = func.index, - } }), - .int => |int| if (ip.isIntegerType(new_ty)) - return getCoercedInts(ip, gpa, int, new_ty) - else if (ip.isEnumType(new_ty)) - return ip.get(gpa, .{ .enum_tag = .{ + switch (val) { + .undef => return ip.get(gpa, .{ .undef = new_ty }), + .null_value => if (ip.isOptionalType(new_ty)) + return ip.get(gpa, .{ .opt = .{ .ty = new_ty, - .int = val, + .val = .none, } }), - .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty)) - return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty), - .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) { - .enum_type => |enum_type| { - const index = enum_type.nameIndex(ip, enum_literal).?; + else => switch (ip.indexToKey(val)) { + .undef => return ip.get(gpa, .{ .undef = new_ty }), + .extern_func => |extern_func| if (ip.isFunctionType(new_ty)) + return ip.get(gpa, .{ .extern_func = .{ + .ty = new_ty, + .decl = extern_func.decl, + .lib_name = extern_func.lib_name, + } }), + .func => |func| if (ip.isFunctionType(new_ty)) + return ip.get(gpa, .{ .func = .{ + .ty = new_ty, + .index = func.index, + } }), + .int => |int| if (ip.isIntegerType(new_ty)) + return getCoercedInts(ip, gpa, int, new_ty) + else if (ip.isEnumType(new_ty)) return ip.get(gpa, .{ .enum_tag = .{ .ty = new_ty, - .int = if (enum_type.values.len != 0) - enum_type.values[index] - else - try ip.get(gpa, .{ .int = .{ - .ty = enum_type.tag_ty, - .storage = .{ .u64 = index }, - } }), - } }); + .int = val, + } }) + else if (ip.isPointerType(new_ty)) + return ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = .{ .int = val }, + } }), + .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty)) + return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty), + .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) { + .enum_type => |enum_type| { + const index = enum_type.nameIndex(ip, enum_literal).?; + return ip.get(gpa, .{ .enum_tag = .{ + .ty = new_ty, + .int = if (enum_type.values.len != 0) + enum_type.values[index] + else + try ip.get(gpa, .{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = index }, + } }), + } }); + }, + else => {}, }, - else => {}, - }, - .ptr => |ptr| if (ip.isPointerType(new_ty)) - return ip.get(gpa, .{ .ptr = .{ - .ty = new_ty, - .addr = ptr.addr, - .len = ptr.len, - } }), - .err => |err| if (ip.isErrorSetType(new_ty)) - return ip.get(gpa, .{ .err = .{ - .ty = new_ty, - .name = err.name, - } }) - else if (ip.isErrorUnionType(new_ty)) - return ip.get(gpa, .{ .error_union = .{ - .ty = new_ty, - .val = .{ .err_name = err.name }, - } }), - .error_union => |error_union| if (ip.isErrorUnionType(new_ty)) - return ip.get(gpa, .{ .error_union = .{ + .ptr => |ptr| if (ip.isPointerType(new_ty)) + return ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = ptr.addr, + .len = ptr.len, + } }), + .err => |err| if (ip.isErrorSetType(new_ty)) + return ip.get(gpa, .{ .err = .{ + .ty = new_ty, + .name = err.name, + } }) + else if (ip.isErrorUnionType(new_ty)) + return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = .{ .err_name = err.name }, + } }), + .error_union => |error_union| if (ip.isErrorUnionType(new_ty)) + return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = error_union.val, + } }), + .aggregate => |aggregate| return ip.get(gpa, .{ .aggregate = .{ .ty = new_ty, - .val = error_union.val, + .storage = switch (aggregate.storage) { + .bytes => |bytes| .{ .bytes = bytes[0..@intCast(usize, ip.aggregateTypeLen(new_ty))] }, + .elems => |elems| .{ .elems = elems[0..@intCast(usize, ip.aggregateTypeLen(new_ty))] }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, + }, } }), - else => {}, + else => {}, + }, } switch (ip.indexToKey(new_ty)) { .opt_type => |child_type| switch (val) { @@ -4527,7 +4616,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_function => b: { const info = ip.extraData(TypeFunction, data); - break :b @sizeOf(TypeFunction) + (@sizeOf(u32) * info.params_len); + break :b @sizeOf(TypeFunction) + (@sizeOf(Index) * info.params_len); }, .undef => 0, @@ -4570,14 +4659,14 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .bytes => b: { const info = ip.extraData(Bytes, data); - const len = @intCast(u32, ip.aggregateTypeLen(info.ty)); + const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty)); break :b @sizeOf(Bytes) + len + @boolToInt(ip.string_bytes.items[@enumToInt(info.bytes) + len - 1] != 0); }, .aggregate => b: { const info = ip.extraData(Aggregate, data); - const fields_len = @intCast(u32, ip.aggregateTypeLen(info.ty)); - break :b @sizeOf(Aggregate) + (@sizeOf(u32) * fields_len); + const fields_len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty)); + break :b @sizeOf(Aggregate) + (@sizeOf(Index) * fields_len); }, .repeated => @sizeOf(Repeated), @@ -4889,6 +4978,16 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { }; } +pub fn aggregateTypeLenIncludingSentinel(ip: InternPool, ty: Index) u64 { + return switch (ip.indexToKey(ty)) { + .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .array_type => |array_type| array_type.len + @boolToInt(array_type.sentinel != .none), + .vector_type => |vector_type| vector_type.len, + else => unreachable, + }; +} + pub fn isNoReturn(ip: InternPool, ty: Index) bool { return switch (ty) { .noreturn_type => true, diff --git a/src/Module.zig b/src/Module.zig index 1605bffdd90c..314e636bab52 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -99,6 +99,7 @@ monomorphed_funcs: MonomorphedFuncsSet = .{}, /// The set of all comptime function calls that have been cached so that future calls /// with the same parameters will get the same return value. memoized_calls: MemoizedCallSet = .{}, +memoized_call_args: MemoizedCall.Args = .{}, /// Contains the values from `@setAlignStack`. A sparse table is used here /// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while /// functions are many. @@ -230,46 +231,30 @@ pub const MemoizedCallSet = std.HashMapUnmanaged( ); pub const MemoizedCall = struct { - module: *Module, + args: *const Args, + + pub const Args = std.ArrayListUnmanaged(InternPool.Index); pub const Key = struct { func: Fn.Index, - args: []TypedValue, - }; + args_index: u32, + args_count: u32, - pub const Result = struct { - val: Value, - arena: std.heap.ArenaAllocator.State, + pub fn args(key: Key, ctx: MemoizedCall) []InternPool.Index { + return ctx.args.items[key.args_index..][0..key.args_count]; + } }; - pub fn eql(ctx: @This(), a: Key, b: Key) bool { - if (a.func != b.func) return false; - - assert(a.args.len == b.args.len); - for (a.args, 0..) |a_arg, arg_i| { - const b_arg = b.args[arg_i]; - if (!a_arg.eql(b_arg, ctx.module)) { - return false; - } - } + pub const Result = InternPool.Index; - return true; + pub fn eql(ctx: MemoizedCall, a: Key, b: Key) bool { + return a.func == b.func and mem.eql(InternPool.Index, a.args(ctx), b.args(ctx)); } - /// Must match `Sema.GenericCallAdapter.hash`. - pub fn hash(ctx: @This(), key: Key) u64 { + pub fn hash(ctx: MemoizedCall, key: Key) u64 { var hasher = std.hash.Wyhash.init(0); - - // The generic function Decl is guaranteed to be the first dependency - // of each of its instantiations. std.hash.autoHash(&hasher, key.func); - - // This logic must be kept in sync with the logic in `analyzeCall` that - // computes the hash. - for (key.args) |arg| { - arg.hash(&hasher, ctx.module); - } - + std.hash.autoHashStrat(&hasher, key.args(ctx), .Deep); return hasher.final(); } }; @@ -883,6 +868,10 @@ pub const Decl = struct { return decl.ty.abiAlignment(mod); } } + + pub fn intern(decl: *Decl, mod: *Module) Allocator.Error!void { + decl.val = (try decl.val.intern(decl.ty, mod)).toValue(); + } }; /// This state is attached to every Decl when Module emit_h is non-null. @@ -3325,15 +3314,8 @@ pub fn deinit(mod: *Module) void { mod.test_functions.deinit(gpa); mod.align_stack_fns.deinit(gpa); mod.monomorphed_funcs.deinit(gpa); - - { - var it = mod.memoized_calls.iterator(); - while (it.next()) |entry| { - gpa.free(entry.key_ptr.args); - entry.value_ptr.arena.promote(gpa).deinit(); - } - mod.memoized_calls.deinit(gpa); - } + mod.memoized_call_args.deinit(gpa); + mod.memoized_calls.deinit(gpa); mod.decls_free_list.deinit(gpa); mod.allocated_decls.deinit(gpa); @@ -5894,6 +5876,7 @@ pub fn initNewAnonDecl( typed_value: TypedValue, name: [:0]u8, ) !void { + assert(typed_value.ty.toIntern() == mod.intern_pool.typeOf(typed_value.val.toIntern())); errdefer mod.gpa.free(name); const new_decl = mod.declPtr(new_decl_index); @@ -6645,7 +6628,7 @@ pub fn markDeclAlive(mod: *Module, decl: *Decl) Allocator.Error!void { if (decl.alive) return; decl.alive = true; - decl.val = (try decl.val.intern(decl.ty, mod)).toValue(); + try decl.intern(mod); // This is the first time we are marking this Decl alive. We must // therefore recurse into its value and mark any Decl it references @@ -6749,15 +6732,19 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type } } - // Canonicalize host_size. If it matches the bit size of the pointee type, - // we change it to 0 here. If this causes an assertion trip, the pointee type - // needs to be resolved before calling this ptr() function. - if (info.host_size != 0) { - const elem_bit_size = info.elem_type.toType().bitSize(mod); - assert(info.bit_offset + elem_bit_size <= info.host_size * 8); - if (info.host_size * 8 == elem_bit_size) { - canon_info.host_size = 0; - } + switch (info.vector_index) { + // Canonicalize host_size. If it matches the bit size of the pointee type, + // we change it to 0 here. If this causes an assertion trip, the pointee type + // needs to be resolved before calling this ptr() function. + .none => if (info.host_size != 0) { + const elem_bit_size = info.elem_type.toType().bitSize(mod); + assert(info.bit_offset + elem_bit_size <= info.host_size * 8); + if (info.host_size * 8 == elem_bit_size) { + canon_info.host_size = 0; + } + }, + .runtime => {}, + _ => assert(@enumToInt(info.vector_index) < info.host_size), } return (try intern(mod, .{ .ptr_type = canon_info })).toType(); diff --git a/src/RangeSet.zig b/src/RangeSet.zig index a015c7b56845..f808322fc7d3 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -1,18 +1,18 @@ const std = @import("std"); +const assert = std.debug.assert; const Order = std.math.Order; -const RangeSet = @This(); +const InternPool = @import("InternPool.zig"); const Module = @import("Module.zig"); +const RangeSet = @This(); const SwitchProngSrc = @import("Module.zig").SwitchProngSrc; -const Type = @import("type.zig").Type; -const Value = @import("value.zig").Value; ranges: std.ArrayList(Range), module: *Module, pub const Range = struct { - first: Value, - last: Value, + first: InternPool.Index, + last: InternPool.Index, src: SwitchProngSrc, }; @@ -29,18 +29,27 @@ pub fn deinit(self: *RangeSet) void { pub fn add( self: *RangeSet, - first: Value, - last: Value, - ty: Type, + first: InternPool.Index, + last: InternPool.Index, src: SwitchProngSrc, ) !?SwitchProngSrc { + const mod = self.module; + const ip = &mod.intern_pool; + + const ty = ip.typeOf(first); + assert(ty == ip.typeOf(last)); + for (self.ranges.items) |range| { - if (last.compareScalar(.gte, range.first, ty, self.module) and - first.compareScalar(.lte, range.last, ty, self.module)) + assert(ty == ip.typeOf(range.first)); + assert(ty == ip.typeOf(range.last)); + + if (last.toValue().compareScalar(.gte, range.first.toValue(), ty.toType(), mod) and + first.toValue().compareScalar(.lte, range.last.toValue(), ty.toType(), mod)) { return range.src; // They overlap. } } + try self.ranges.append(.{ .first = first, .last = last, @@ -49,30 +58,29 @@ pub fn add( return null; } -const LessThanContext = struct { ty: Type, module: *Module }; - /// Assumes a and b do not overlap -fn lessThan(ctx: LessThanContext, a: Range, b: Range) bool { - return a.first.compareScalar(.lt, b.first, ctx.ty, ctx.module); +fn lessThan(mod: *Module, a: Range, b: Range) bool { + const ty = mod.intern_pool.typeOf(a.first).toType(); + return a.first.toValue().compareScalar(.lt, b.first.toValue(), ty, mod); } -pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { +pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool { + const mod = self.module; + const ip = &mod.intern_pool; + assert(ip.typeOf(first) == ip.typeOf(last)); + if (self.ranges.items.len == 0) return false; - const mod = self.module; - std.mem.sort(Range, self.ranges.items, LessThanContext{ - .ty = ty, - .module = mod, - }, lessThan); + std.mem.sort(Range, self.ranges.items, mod, lessThan); - if (!self.ranges.items[0].first.eql(first, ty, mod) or - !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, mod)) + if (self.ranges.items[0].first != first or + self.ranges.items[self.ranges.items.len - 1].last != last) { return false; } - var space: Value.BigIntSpace = undefined; + var space: InternPool.Key.Int.Storage.BigIntSpace = undefined; var counter = try std.math.big.int.Managed.init(self.ranges.allocator); defer counter.deinit(); @@ -83,10 +91,10 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { const prev = self.ranges.items[i]; // prev.last + 1 == cur.first - try counter.copy(prev.last.toBigInt(&space, mod)); + try counter.copy(prev.last.toValue().toBigInt(&space, mod)); try counter.addScalar(&counter, 1); - const cur_start_int = cur.first.toBigInt(&space, mod); + const cur_start_int = cur.first.toValue().toBigInt(&space, mod); if (!cur_start_int.eq(counter.toConst())) { return false; } diff --git a/src/Sema.zig b/src/Sema.zig index c3512985113e..61061279e427 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1609,7 +1609,7 @@ fn analyzeBodyInner( if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - const inline_body = if (cond.val.toBool(mod)) then_body else else_body; + const inline_body = if (cond.val.toBool()) then_body else else_body; try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src); const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1630,7 +1630,7 @@ fn analyzeBodyInner( if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - const inline_body = if (cond.val.toBool(mod)) then_body else else_body; + const inline_body = if (cond.val.toBool()) then_body else else_body; try sema.maybeErrorUnwrapCondbr(block, inline_body, extra.data.condition, cond_src); const old_runtime_index = block.runtime_index; @@ -1663,7 +1663,7 @@ fn analyzeBodyInner( if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_val.toBool(mod)) { + if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1689,7 +1689,7 @@ fn analyzeBodyInner( if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_val.toBool(mod)) { + if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1778,12 +1778,11 @@ fn resolveConstBool( zir_ref: Zir.Inst.Ref, reason: []const u8, ) !bool { - const mod = sema.mod; const air_inst = try sema.resolveInst(zir_ref); const wanted_type = Type.bool; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, reason); - return val.toBool(mod); + return val.toBool(); } pub fn resolveConstString( @@ -2488,7 +2487,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE defer anon_decl.deinit(); const decl_index = try anon_decl.finish( pointee_ty, - Value.undef, + (try mod.intern(.{ .undef = pointee_ty.toIntern() })).toValue(), alignment.toByteUnits(0), ); sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime.decl_index = decl_index; @@ -2611,7 +2610,7 @@ fn coerceResultPtr( .@"addrspace" = addr_space, }); if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| { - new_ptr = try sema.addConstant(ptr_operand_ty, ptr_val); + new_ptr = try sema.addConstant(ptr_operand_ty, try mod.getCoerced(ptr_val, ptr_operand_ty)); } else { new_ptr = try sema.bitCast(block, ptr_operand_ty, new_ptr, src, null); } @@ -3613,7 +3612,7 @@ fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Ai // Detect if a comptime value simply needs to have its type changed. if (try sema.resolveMaybeUndefVal(alloc)) |val| { - return sema.addConstant(const_ptr_ty, val); + return sema.addConstant(const_ptr_ty, try mod.getCoerced(val, const_ptr_ty)); } return block.addBitCast(const_ptr_ty, alloc); @@ -3735,6 +3734,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com try mod.declareDeclDependency(sema.owner_decl_index, decl_index); const decl = mod.declPtr(decl_index); + if (iac.is_const) try decl.intern(mod); const final_elem_ty = decl.ty; const final_ptr_ty = try mod.ptrType(.{ .elem_type = final_elem_ty.toIntern(), @@ -3774,7 +3774,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Detect if the value is comptime-known. In such case, the // last 3 AIR instructions of the block will look like this: // - // %a = interned + // %a = inferred_alloc // %b = bitcast(%a) // %c = store(%b, %d) // @@ -3814,22 +3814,22 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com } }; - const const_inst = while (true) { + while (true) { if (search_index == 0) break :ct; search_index -= 1; const candidate = block.instructions.items[search_index]; + if (candidate == ptr_inst) break; switch (air_tags[candidate]) { .dbg_stmt, .dbg_block_begin, .dbg_block_end => continue, - .interned => break candidate, else => break :ct, } - }; + } const store_op = air_datas[store_inst].bin_op; const store_val = (try sema.resolveMaybeUndefVal(store_op.rhs)) orelse break :ct; if (store_op.lhs != Air.indexToRef(bitcast_inst)) break :ct; - if (air_datas[bitcast_inst].ty_op.operand != Air.indexToRef(const_inst)) break :ct; + if (air_datas[bitcast_inst].ty_op.operand != ptr) break :ct; const new_decl_index = d: { var anon_decl = try block.startAnonDecl(); @@ -3850,7 +3850,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com sema.air_instructions.set(ptr_inst, .{ .tag = .interned, .data = .{ .interned = try mod.intern(.{ .ptr = .{ - .ty = final_elem_ty.toIntern(), + .ty = final_ptr_ty.toIntern(), .addr = .{ .decl = new_decl_index }, } }) }, }); @@ -4707,15 +4707,23 @@ fn zirValidateArrayInit( return; } + // If the array has one possible value, the value is always comptime-known. + if (try sema.typeHasOnePossibleValue(array_ty)) |array_opv| { + const array_init = try sema.addConstant(array_ty, array_opv); + try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store); + return; + } + var array_is_comptime = true; var first_block_index = block.instructions.items.len; var make_runtime = false; // Collect the comptime element values in case the array literal ends up // being comptime-known. - const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod)); - const element_vals = try sema.arena.alloc(InternPool.Index, array_len_s); - const opt_opv = try sema.typeHasOnePossibleValue(array_ty); + const element_vals = try sema.arena.alloc( + InternPool.Index, + try sema.usizeCast(block, init_src, array_len), + ); const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); @@ -4727,12 +4735,6 @@ fn zirValidateArrayInit( element_vals[i] = opv.toIntern(); continue; } - } else { - // Array has one possible value, so value is always comptime-known - if (opt_opv) |opv| { - element_vals[i] = opv.toIntern(); - continue; - } } const elem_ptr_air_ref = sema.inst_map.get(elem_ptr).?; @@ -4814,11 +4816,6 @@ fn zirValidateArrayInit( // Our task is to delete all the `elem_ptr` and `store` instructions, and insert // instead a single `store` to the array_ptr with a comptime struct value. - // Also to populate the sentinel value, if any. - if (array_ty.sentinel(mod)) |sentinel_val| { - element_vals[instrs.len] = sentinel_val.toIntern(); - } - block.instructions.shrinkRetainingCapacity(first_block_index); var array_val = try mod.intern(.{ .aggregate = .{ @@ -6259,7 +6256,7 @@ fn popErrorReturnTrace( if (operand != .none) { is_non_error_inst = try sema.analyzeIsNonErr(block, src, operand); if (try sema.resolveDefinedValue(block, src, is_non_error_inst)) |cond_val| - is_non_error = cond_val.toBool(mod); + is_non_error = cond_val.toBool(); } else is_non_error = true; // no operand means pop unconditionally if (is_non_error == true) { @@ -6873,14 +6870,15 @@ fn analyzeCall( // If it's a comptime function call, we need to memoize it as long as no external // comptime memory is mutated. - var memoized_call_key: Module.MemoizedCall.Key = undefined; + var memoized_call_key = Module.MemoizedCall.Key{ + .func = module_fn_index, + .args_index = @intCast(u32, mod.memoized_call_args.items.len), + .args_count = @intCast(u32, func_ty_info.param_types.len), + }; var delete_memoized_call_key = false; - defer if (delete_memoized_call_key) gpa.free(memoized_call_key.args); + defer if (delete_memoized_call_key) mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index); if (is_comptime_call) { - memoized_call_key = .{ - .func = module_fn_index, - .args = try gpa.alloc(TypedValue, func_ty_info.param_types.len), - }; + try mod.memoized_call_args.ensureUnusedCapacity(gpa, memoized_call_key.args_count); delete_memoized_call_key = true; } @@ -6916,8 +6914,7 @@ fn analyzeCall( uncasted_args, is_comptime_call, &should_memoize, - memoized_call_key, - func_ty_info.param_types, + mod.typeToFunc(func_ty).?.param_types, func, &has_comptime_args, ) catch |err| switch (err) { @@ -6934,8 +6931,7 @@ fn analyzeCall( uncasted_args, is_comptime_call, &should_memoize, - memoized_call_key, - func_ty_info.param_types, + mod.typeToFunc(func_ty).?.param_types, func, &has_comptime_args, ); @@ -6988,9 +6984,19 @@ fn analyzeCall( // bug generating invalid LLVM IR. const res2: Air.Inst.Ref = res2: { if (should_memoize and is_comptime_call) { - if (mod.memoized_calls.getContext(memoized_call_key, .{ .module = mod })) |result| { - break :res2 try sema.addConstant(fn_ret_ty, result.val); + const gop = try mod.memoized_calls.getOrPutContext( + gpa, + memoized_call_key, + .{ .args = &mod.memoized_call_args }, + ); + if (gop.found_existing) { + // We need to use the original memoized error set instead of fn_ret_ty. + const result = gop.value_ptr.*; + assert(result != .none); // recursive memoization? + break :res2 try sema.addConstant(mod.intern_pool.typeOf(result).toType(), result.toValue()); } + gop.value_ptr.* = .none; + delete_memoized_call_key = false; } const new_func_resolved_ty = try mod.funcType(new_fn_info); @@ -7049,26 +7055,10 @@ fn analyzeCall( if (should_memoize and is_comptime_call) { const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, ""); - - // TODO: check whether any external comptime memory was mutated by the - // comptime function call. If so, then do not memoize the call here. - // TODO: re-evaluate whether memoized_calls needs its own arena. I think - // it should be fine to use the Decl arena for the function. - { - var arena_allocator = std.heap.ArenaAllocator.init(gpa); - errdefer arena_allocator.deinit(); - const arena = arena_allocator.allocator(); - - for (memoized_call_key.args) |*arg| { - arg.* = try arg.*.copy(arena); - } - - try mod.memoized_calls.putContext(gpa, memoized_call_key, .{ - .val = try result_val.copy(arena), - .arena = arena_allocator.state, - }, .{ .module = mod }); - delete_memoized_call_key = false; - } + mod.memoized_calls.getPtrContext( + memoized_call_key, + .{ .args = &mod.memoized_call_args }, + ).?.* = try result_val.intern(fn_ret_ty, mod); } break :res2 result; @@ -7214,11 +7204,11 @@ fn analyzeInlineCallArg( uncasted_args: []const Air.Inst.Ref, is_comptime_call: bool, should_memoize: *bool, - memoized_call_key: Module.MemoizedCall.Key, raw_param_types: []const InternPool.Index, func_inst: Air.Inst.Ref, has_comptime_args: *bool, ) !void { + const mod = sema.mod; const zir_tags = sema.code.instructions.items(.tag); switch (zir_tags[inst]) { .param_comptime, .param_anytype_comptime => has_comptime_args.* = true, @@ -7276,11 +7266,8 @@ fn analyzeInlineCallArg( try sema.resolveLazyValue(arg_val); }, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(sema.mod); - memoized_call_key.args[arg_i.*] = .{ - .ty = param_ty.toType(), - .val = arg_val, - }; + should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(mod); + mod.memoized_call_args.appendAssumeCapacity(try arg_val.intern(param_ty.toType(), mod)); } else { sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg); } @@ -7315,11 +7302,8 @@ fn analyzeInlineCallArg( try sema.resolveLazyValue(arg_val); }, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(sema.mod); - memoized_call_key.args[arg_i.*] = .{ - .ty = sema.typeOf(uncasted_arg), - .val = arg_val, - }; + should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(mod); + mod.memoized_call_args.appendAssumeCapacity(try arg_val.intern(sema.typeOf(uncasted_arg), mod)); } else { if (zir_tags[inst] == .param_anytype_comptime) { _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); @@ -8279,7 +8263,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const int_tag_ty = try enum_tag_ty.intTagType(mod); if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| { - return sema.addConstant(int_tag_ty, opv); + return sema.addConstant(int_tag_ty, try mod.getCoerced(opv, int_tag_ty)); } if (try sema.resolveMaybeUndefVal(enum_tag)) |enum_tag_val| { @@ -8310,7 +8294,10 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (dest_ty.isNonexhaustiveEnum(mod)) { const int_tag_ty = try dest_ty.intTagType(mod); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { - return sema.addConstant(dest_ty, int_val); + return sema.addConstant(dest_ty, (try mod.intern(.{ .enum_tag = .{ + .ty = dest_ty.toIntern(), + .int = int_val.toIntern(), + } })).toValue()); } const msg = msg: { const msg = try sema.errMsg( @@ -8657,8 +8644,10 @@ fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air const result_ty = operand_ty.errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - assert(val.getError(mod) != null); - return sema.addConstant(result_ty, val); + return sema.addConstant(result_ty, (try mod.intern(.{ .err = .{ + .ty = result_ty.toIntern(), + .name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -10737,7 +10726,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, &range_set, item_ref, - operand_ty, src_node_offset, .{ .scalar = scalar_i }, ); @@ -10760,7 +10748,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, &range_set, item_ref, - operand_ty, src_node_offset, .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, ); @@ -10778,7 +10765,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError &range_set, item_first, item_last, - operand_ty, src_node_offset, .{ .range = .{ .prong = multi_i, .item = range_i } }, ); @@ -10792,7 +10778,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (operand_ty.zigTypeTag(mod) == .Int) { const min_int = try operand_ty.minInt(mod, operand_ty); const max_int = try operand_ty.maxInt(mod, operand_ty); - if (try range_set.spans(min_int, max_int, operand_ty)) { + if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) { if (special_prong == .@"else") { return sema.fail( block, @@ -10894,11 +10880,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError ); } - var seen_values = ValueSrcMap.initContext(gpa, .{ - .ty = operand_ty, - .mod = mod, - }); - defer seen_values.deinit(); + var seen_values = ValueSrcMap{}; + defer seen_values.deinit(gpa); var extra_index: usize = special.end; { @@ -11664,10 +11647,10 @@ const RangeSetUnhandledIterator = struct { it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty); } it.first = false; - if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { + if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first.toValue(), it.ty, it.sema.mod)) { return it.cur; } - it.cur = it.ranges[it.range_i].last; + it.cur = it.ranges[it.range_i].last.toValue(); } if (!it.first) { it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty); @@ -11687,16 +11670,15 @@ fn resolveSwitchItemVal( switch_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, -) CompileError!TypedValue { +) CompileError!InternPool.Index { const mod = sema.mod; const item = try sema.resolveInst(item_ref); - const item_ty = sema.typeOf(item); // Constructing a LazySrcLoc is costly because we only have the switch AST node. // Only if we know for sure we need to report a compile error do we resolve the // full source locations. if (sema.resolveConstValue(block, .unneeded, item, "")) |val| { try sema.resolveLazyValue(val); - return TypedValue{ .ty = item_ty, .val = val }; + return val.toIntern(); } else |err| switch (err) { error.NeededSourceLocation => { const src = switch_prong_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_node_offset, range_expand); @@ -11713,18 +11695,17 @@ fn validateSwitchRange( range_set: *RangeSet, first_ref: Zir.Inst.Ref, last_ref: Zir.Inst.Ref, - operand_ty: Type, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const mod = sema.mod; - const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; - const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; - if (first_val.compareScalar(.gt, last_val, operand_ty, mod)) { + const first = try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first); + const last = try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last); + if (first.toValue().compareScalar(.gt, last.toValue(), mod.intern_pool.typeOf(first).toType(), mod)) { const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), src_node_offset, .first); return sema.fail(block, src, "range start value is greater than the end value", .{}); } - const maybe_prev_src = try range_set.add(first_val, last_val, operand_ty, switch_prong_src); + const maybe_prev_src = try range_set.add(first, last, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); } @@ -11733,12 +11714,11 @@ fn validateSwitchItem( block: *Block, range_set: *RangeSet, item_ref: Zir.Inst.Ref, - operand_ty: Type, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; - const maybe_prev_src = try range_set.add(item_val, item_val, operand_ty, switch_prong_src); + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + const maybe_prev_src = try range_set.add(item, item, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); } @@ -11751,9 +11731,11 @@ fn validateSwitchItemEnum( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); - const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val, sema.mod) orelse { - const maybe_prev_src = try range_set.add(item_tv.val, item_tv.val, item_tv.ty, switch_prong_src); + const ip = &sema.mod.intern_pool; + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + const int = ip.indexToKey(item).enum_tag.int; + const field_index = ip.indexToKey(ip.typeOf(item)).enum_type.tagValueIndex(ip, int) orelse { + const maybe_prev_src = try range_set.add(int, int, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); }; const maybe_prev_src = seen_fields[field_index]; @@ -11770,9 +11752,9 @@ fn validateSwitchItemError( switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const ip = &sema.mod.intern_pool; - const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); // TODO: Do i need to typecheck here? - const error_name = ip.stringToSlice(ip.indexToKey(item_tv.val.toIntern()).err.name); + const error_name = ip.stringToSlice(ip.indexToKey(item).err.name); const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev| prev.value else @@ -11822,8 +11804,8 @@ fn validateSwitchItemBool( switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { const mod = sema.mod; - const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; - if (item_val.toBool(mod)) { + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + if (item.toValue().toBool()) { true_count.* += 1; } else { false_count.* += 1; @@ -11835,7 +11817,7 @@ fn validateSwitchItemBool( } } -const ValueSrcMap = std.HashMap(Value, Module.SwitchProngSrc, Value.HashContext, std.hash_map.default_max_load_percentage); +const ValueSrcMap = std.AutoHashMapUnmanaged(InternPool.Index, Module.SwitchProngSrc); fn validateSwitchItemSparse( sema: *Sema, @@ -11845,8 +11827,8 @@ fn validateSwitchItemSparse( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; - const kv = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return; + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + const kv = (try seen_values.fetchPut(sema.gpa, item, switch_prong_src)) orelse return; return sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset); } @@ -13047,8 +13029,6 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { - const final_len_including_sent = result_len + @boolToInt(lhs_info.sentinel != null); - const lhs_sub_val = if (lhs_ty.isSinglePointer(mod)) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else @@ -13065,7 +13045,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); } - const element_vals = try sema.arena.alloc(InternPool.Index, final_len_including_sent); + const element_vals = try sema.arena.alloc(InternPool.Index, result_len); var elem_i: usize = 0; while (elem_i < result_len) { var lhs_i: usize = 0; @@ -13075,9 +13055,6 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai elem_i += 1; } } - if (lhs_info.sentinel) |sent_val| { - element_vals[result_len] = sent_val.toIntern(); - } break :v try mod.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), .storage = .{ .elems = element_vals }, @@ -14896,13 +14873,18 @@ fn analyzeArithmetic( .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), else => unreachable, }; + const scalar_one = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } @@ -14916,7 +14898,7 @@ fn analyzeArithmetic( const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -15524,7 +15506,7 @@ fn cmpSelf( } else { if (resolved_type.zigTypeTag(mod) == .Bool) { // We can lower bool eq/neq more efficiently. - return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(mod), rhs_src); + return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src); } break :src rhs_src; } @@ -15534,7 +15516,7 @@ fn cmpSelf( if (resolved_type.zigTypeTag(mod) == .Bool) { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); - return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(mod), lhs_src); + return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src); } } break :src lhs_src; @@ -15840,6 +15822,7 @@ fn zirBuiltinSrc( break :blk try mod.intern(.{ .ptr = .{ .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); }; @@ -15864,6 +15847,7 @@ fn zirBuiltinSrc( break :blk try mod.intern(.{ .ptr = .{ .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); }; @@ -16314,6 +16298,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :v try mod.intern(.{ .ptr = .{ .ty = slice_errors_ty.toIntern(), .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, vals.len)).toIntern(), } }); } else .none; const errors_val = try mod.intern(.{ .opt = .{ @@ -16438,6 +16423,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .is_const = true, })).toIntern(), .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, enum_field_vals.len)).toIntern(), } }); }; @@ -17141,7 +17127,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (try sema.resolveMaybeUndefVal(operand)) |val| { return if (val.isUndef(mod)) sema.addConstUndef(Type.bool) - else if (val.toBool(mod)) + else if (val.toBool()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true; @@ -17169,9 +17155,9 @@ fn zirBoolBr( const gpa = sema.gpa; if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| { - if (is_bool_or and lhs_val.toBool(mod)) { + if (is_bool_or and lhs_val.toBool()) { return Air.Inst.Ref.bool_true; - } else if (!is_bool_or and !lhs_val.toBool(mod)) { + } else if (!is_bool_or and !lhs_val.toBool()) { return Air.Inst.Ref.bool_false; } // comptime-known left-hand side. No need for a block here; the result @@ -17215,9 +17201,9 @@ fn zirBoolBr( const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst); if (!sema.typeOf(rhs_result).isNoReturn(mod)) { if (try sema.resolveDefinedValue(rhs_block, sema.src, rhs_result)) |rhs_val| { - if (is_bool_or and rhs_val.toBool(mod)) { + if (is_bool_or and rhs_val.toBool()) { return Air.Inst.Ref.bool_true; - } else if (!is_bool_or and !rhs_val.toBool(mod)) { + } else if (!is_bool_or and !rhs_val.toBool()) { return Air.Inst.Ref.bool_false; } } @@ -17371,7 +17357,7 @@ fn zirCondbr( const cond = try sema.coerce(parent_block, Type.bool, uncasted_cond, cond_src); if (try sema.resolveDefinedValue(parent_block, cond_src, cond)) |cond_val| { - const body = if (cond_val.toBool(mod)) then_body else else_body; + const body = if (cond_val.toBool()) then_body else else_body; try sema.maybeErrorUnwrapCondbr(parent_block, body, extra.data.condition, cond_src); // We use `analyzeBodyInner` since we want to propagate any possible @@ -17444,7 +17430,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); if (is_non_err != .none) { const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?; - if (is_non_err_val.toBool(mod)) { + if (is_non_err_val.toBool()) { return sema.analyzeErrUnionPayload(parent_block, src, err_union_ty, err_union, operand_src, false); } // We can analyze the body directly in the parent block because we know there are @@ -17491,7 +17477,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); if (is_non_err != .none) { const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?; - if (is_non_err_val.toBool(mod)) { + if (is_non_err_val.toBool()) { return sema.analyzeErrUnionPayloadPtr(parent_block, src, operand, false, false); } // We can analyze the body directly in the parent block because we know there are @@ -18858,7 +18844,7 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(inst_data.operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(Type.u1); - if (val.toBool(mod)) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1)); + if (val.toBool()) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1)); return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); } return block.addUnOp(.bool_to_int, operand); @@ -19171,12 +19157,12 @@ fn zirReify( const ty = try mod.ptrType(.{ .size = ptr_size, - .is_const = is_const_val.toBool(mod), - .is_volatile = is_volatile_val.toBool(mod), + .is_const = is_const_val.toBool(), + .is_volatile = is_volatile_val.toBool(), .alignment = abi_align, .address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val), .elem_type = elem_ty.toIntern(), - .is_allowzero = is_allowzero_val.toBool(mod), + .is_allowzero = is_allowzero_val.toBool(), .sentinel = actual_sentinel, }); return sema.addType(ty); @@ -19267,7 +19253,7 @@ fn zirReify( return sema.fail(block, src, "non-packed struct does not support backing integer type", .{}); } - return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool(mod)); + return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool()); }, .Enum => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); @@ -19305,7 +19291,7 @@ fn zirReify( .namespace = .none, .fields_len = fields_len, .has_values = true, - .tag_mode = if (!is_exhaustive_val.toBool(mod)) + .tag_mode = if (!is_exhaustive_val.toBool()) .nonexhaustive else .explicit, @@ -19619,12 +19605,12 @@ fn zirReify( const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("return_type").?); const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("params").?); - const is_generic = is_generic_val.toBool(mod); + const is_generic = is_generic_val.toBool(); if (is_generic) { return sema.fail(block, src, "Type.Fn.is_generic must be false for @Type", .{}); } - const is_var_args = is_var_args_val.toBool(mod); + const is_var_args = is_var_args_val.toBool(); const cc = mod.toEnum(std.builtin.CallingConvention, calling_convention_val); if (is_var_args and cc != .C) { return sema.fail(block, src, "varargs functions must have C calling convention", .{}); @@ -19653,9 +19639,9 @@ fn zirReify( const arg_val = arg.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // is_generic: bool, - const arg_is_generic = arg_val[0].toBool(mod); + const arg_is_generic = arg_val[0].toBool(); // is_noalias: bool, - const arg_is_noalias = arg_val[1].toBool(mod); + const arg_is_noalias = arg_val[1].toBool(); // type: ?type, const param_type_opt_val = arg_val[2]; @@ -19783,9 +19769,9 @@ fn reifyStruct( if (layout == .Packed) { if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{}); - if (is_comptime_val.toBool(mod)) return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{}); + if (is_comptime_val.toBool()) return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{}); } - if (layout == .Extern and is_comptime_val.toBool(mod)) { + if (layout == .Extern and is_comptime_val.toBool()) { return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{}); } @@ -19827,7 +19813,7 @@ fn reifyStruct( opt_val; break :blk try payload_val.copy(new_decl_arena_allocator); } else Value.@"unreachable"; - if (is_comptime_val.toBool(mod) and default_val.toIntern() == .unreachable_value) { + if (is_comptime_val.toBool() and default_val.toIntern() == .unreachable_value) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } @@ -19836,7 +19822,7 @@ fn reifyStruct( .ty = field_ty, .abi_align = abi_align, .default_val = default_val, - .is_comptime = is_comptime_val.toBool(mod), + .is_comptime = is_comptime_val.toBool(), .offset = undefined, }; @@ -20400,13 +20386,17 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); } - if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) { - return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{ - .ty = dest_ty.toIntern(), - .val = operand_val.toIntern(), - } })).toValue()); - } - return sema.addConstant(aligned_dest_ty, try mod.getCoerced(operand_val, aligned_dest_ty)); + return sema.addConstant(aligned_dest_ty, try mod.getCoerced(switch (mod.intern_pool.indexToKey(operand_val.toIntern())) { + .undef, .ptr => operand_val, + .opt => |opt| switch (opt.val) { + .none => if (dest_ty.ptrAllowsZero(mod)) + Value.zero_usize + else + return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}), + else => opt.val.toValue(), + }, + else => unreachable, + }, aligned_dest_ty)); } try sema.requireRuntimeBlock(block, src, null); @@ -20534,10 +20524,10 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (try sema.resolveMaybeUndefValIntable(operand)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(dest_ty); if (!is_vector) { - return sema.addConstant( - dest_ty, + return sema.addConstant(dest_ty, try mod.getCoerced( try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod), - ); + dest_ty, + )); } const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { @@ -21410,7 +21400,10 @@ fn zirCmpxchg( // special case zero bit types if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) { - return sema.addConstant(result_ty, Value.null); + return sema.addConstant(result_ty, (try mod.intern(.{ .opt = .{ + .ty = result_ty.toIntern(), + .val = .none, + } })).toValue()); } const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { @@ -21633,8 +21626,7 @@ fn analyzeShuffle( .{ b_len, b_src, b_ty }, }; - var i: usize = 0; - while (i < mask_len) : (i += 1) { + for (0..@intCast(usize, mask_len)) |i| { const elem = try mask.elemValue(sema.mod, i); if (elem.isUndef(mod)) continue; const int = elem.toSignedInt(mod); @@ -21670,7 +21662,7 @@ fn analyzeShuffle( if (try sema.resolveMaybeUndefVal(a)) |a_val| { if (try sema.resolveMaybeUndefVal(b)) |b_val| { const values = try sema.arena.alloc(InternPool.Index, mask_len); - for (values) |*value| { + for (values, 0..) |*value, i| { const mask_elem_val = try mask.elemValue(sema.mod, i); if (mask_elem_val.isUndef(mod)) { value.* = try mod.intern(.{ .undef = elem_ty.toIntern() }); @@ -21698,11 +21690,10 @@ fn analyzeShuffle( const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len)); const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); - i = 0; - while (i < min_len) : (i += 1) { + for (@intCast(usize, 0)..@intCast(usize, min_len)) |i| { expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern(); } - while (i < max_len) : (i += 1) { + for (@intCast(usize, min_len)..@intCast(usize, max_len)) |i| { expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern(); } const expand_mask = try mod.intern(.{ .aggregate = .{ @@ -21783,7 +21774,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const elems = try sema.gpa.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const pred_elem_val = try pred_val.elemValue(mod, i); - const should_choose_a = pred_elem_val.toBool(mod); + const should_choose_a = pred_elem_val.toBool(); elem.* = try (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).intern(elem_ty, mod); } @@ -22853,15 +22844,15 @@ fn zirVarExtended( else uncasted_init; - break :blk (try sema.resolveMaybeUndefVal(init)) orelse - return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime-known"); - } else Value.@"unreachable"; + break :blk ((try sema.resolveMaybeUndefVal(init)) orelse + return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime-known")).toIntern(); + } else .none; try sema.validateVarType(block, ty_src, var_ty, small.is_extern); return sema.addConstant(var_ty, (try mod.intern(.{ .variable = .{ .ty = var_ty.toIntern(), - .init = init_val.toIntern(), + .init = init_val, .decl = sema.owner_decl_index, .lib_name = if (lib_name) |lname| (try mod.intern_pool.getOrPutString( sema.gpa, @@ -23284,7 +23275,7 @@ fn resolveExternOptions( .name = name, .library_name = library_name, .linkage = linkage, - .is_thread_local = is_thread_local_val.toBool(mod), + .is_thread_local = is_thread_local_val.toBool(), }; } @@ -26190,7 +26181,7 @@ fn coerceExtra( .addr = .{ .int = (if (dest_info.@"align" != 0) try mod.intValue(Type.usize, dest_info.@"align") else - try dest_info.pointee_type.lazyAbiAlignment(mod)).toIntern() }, + try mod.getCoerced(try dest_info.pointee_type.lazyAbiAlignment(mod), Type.usize)).toIntern() }, .len = (try mod.intValue(Type.usize, 0)).toIntern(), } })).toValue()); } @@ -27785,7 +27776,7 @@ fn beginComptimePtrMutation( const payload = try arena.create(Value.Payload.SubValue); payload.* = .{ .base = .{ .tag = .eu_payload }, - .data = Value.undef, + .data = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(), }; val_ptr.* = Value.initPayload(&payload.base); @@ -27824,7 +27815,7 @@ fn beginComptimePtrMutation( const payload = try arena.create(Value.Payload.SubValue); payload.* = .{ .base = .{ .tag = .opt_payload }, - .data = Value.undef, + .data = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(), }; val_ptr.* = Value.initPayload(&payload.base); @@ -27898,30 +27889,6 @@ fn beginComptimePtrMutation( } switch (val_ptr.ip_index) { - .undef => { - // An array has been initialized to undefined at comptime and now we - // are for the first time setting an element. We must change the representation - // of the array from `undef` to `array`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - @memset(elems, Value.undef); - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.mut_decl, - ); - }, .none => switch (val_ptr.tag()) { .bytes => { // An array is memory-optimized to store a slice of bytes, but we are about @@ -27999,7 +27966,33 @@ fn beginComptimePtrMutation( else => unreachable, }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) { + .undef => { + // An array has been initialized to undefined at comptime and now we + // are for the first time setting an element. We must change the representation + // of the array from `undef` to `array`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + @memset(elems, (try mod.intern(.{ .undef = elem_ty.toIntern() })).toValue()); + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.mut_decl, + ); + }, + else => unreachable, + }, } }, else => { @@ -28052,83 +28045,6 @@ fn beginComptimePtrMutation( var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.base.toValue(), base_child_ty); switch (parent.pointee) { .direct => |val_ptr| switch (val_ptr.ip_index) { - .undef => { - // A struct or union has been initialized to undefined at comptime and now we - // are for the first time setting a field. We must change the representation - // of the struct/union from `undef` to `struct`/`union`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - switch (parent.ty.zigTypeTag(mod)) { - .Struct => { - const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(fields, Value.undef); - - val_ptr.* = try Value.Tag.aggregate.create(arena, fields); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &fields[field_index], - ptr_elem_ty, - parent.mut_decl, - ); - }, - .Union => { - const payload = try arena.create(Value.Payload.Union); - const tag_ty = parent.ty.unionTagTypeHypothetical(mod); - payload.* = .{ .data = .{ - .tag = try mod.enumValueFieldIndex(tag_ty, field_index), - .val = Value.undef, - } }; - - val_ptr.* = Value.initPayload(&payload.base); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &payload.data.val, - ptr_elem_ty, - parent.mut_decl, - ); - }, - .Pointer => { - assert(parent.ty.isSlice(mod)); - val_ptr.* = try Value.Tag.slice.create(arena, .{ - .ptr = Value.undef, - .len = Value.undef, - }); - - switch (field_index) { - Value.slice_ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.mut_decl, - ), - Value.slice_len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.mut_decl, - ), - - else => unreachable, - } - }, - else => unreachable, - } - }, .empty_struct => { const duped = try sema.arena.create(Value); duped.* = val_ptr.*; @@ -28210,10 +28126,92 @@ fn beginComptimePtrMutation( else => unreachable, }, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) { + .undef => { + // A struct or union has been initialized to undefined at comptime and now we + // are for the first time setting a field. We must change the representation + // of the struct/union from `undef` to `struct`/`union`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + switch (parent.ty.zigTypeTag(mod)) { + .Struct => { + const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + for (fields, 0..) |*field, i| field.* = (try mod.intern(.{ + .undef = parent.ty.structFieldType(i, mod).toIntern(), + })).toValue(); + + val_ptr.* = try Value.Tag.aggregate.create(arena, fields); + + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &fields[field_index], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .Union => { + const payload = try arena.create(Value.Payload.Union); + const tag_ty = parent.ty.unionTagTypeHypothetical(mod); + const payload_ty = parent.ty.structFieldType(field_index, mod); + payload.* = .{ .data = .{ + .tag = try mod.enumValueFieldIndex(tag_ty, field_index), + .val = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(), + } }; + val_ptr.* = Value.initPayload(&payload.base); + + return beginComptimePtrMutationInner( + sema, + block, + src, + payload_ty, + &payload.data.val, + ptr_elem_ty, + parent.mut_decl, + ); + }, + .Pointer => { + assert(parent.ty.isSlice(mod)); + const ptr_ty = parent.ty.slicePtrFieldType(mod); + val_ptr.* = try Value.Tag.slice.create(arena, .{ + .ptr = (try mod.intern(.{ .undef = ptr_ty.toIntern() })).toValue(), + .len = (try mod.intern(.{ .undef = .usize_type })).toValue(), + }); + + switch (field_index) { + Value.slice_ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + ptr_ty, + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.mut_decl, + ), + Value.slice_len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.mut_decl, + ), + + else => unreachable, + } + }, + else => unreachable, + } + }, else => unreachable, }, - else => unreachable, }, .reinterpret => |reinterpret| { const field_offset_u64 = base_child_ty.structFieldOffset(field_index, mod); @@ -28370,18 +28368,22 @@ fn beginComptimePtrLoad( (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; if (coerce_in_mem_ok) { - const payload_val = switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { - .error_union => |error_union| switch (error_union.val) { - .err_name => |err_name| return sema.fail(block, src, "attempt to unwrap error: {s}", .{mod.intern_pool.stringToSlice(err_name)}), - .payload => |payload| payload, - }, - .opt => |opt| switch (opt.val) { - .none => return sema.fail(block, src, "attempt to use null value", .{}), - else => opt.val, - }, - else => unreachable, + const payload_val = switch (tv.val.ip_index) { + .none => tv.val.cast(Value.Payload.SubValue).?.data, + .null_value => return sema.fail(block, src, "attempt to use null value", .{}), + else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| return sema.fail(block, src, "attempt to unwrap error: {s}", .{mod.intern_pool.stringToSlice(err_name)}), + .payload => |payload| payload, + }, + .opt => |opt| switch (opt.val) { + .none => return sema.fail(block, src, "attempt to use null value", .{}), + else => opt.val, + }, + else => unreachable, + }.toValue(), }; - tv.* = TypedValue{ .ty = payload_ty, .val = payload_val.toValue() }; + tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; break :blk deref; } } @@ -28960,7 +28962,7 @@ fn coerceArrayLike( if (in_memory_result == .ok) { if (try sema.resolveMaybeUndefVal(inst)) |inst_val| { // These types share the same comptime value representation. - return sema.addConstant(dest_ty, inst_val); + return sema.addConstant(dest_ty, try mod.getCoerced(inst_val, dest_ty)); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addBitCast(dest_ty, inst); @@ -29024,7 +29026,7 @@ fn coerceTupleToArray( return sema.failWithOwnedErrorMsg(msg); } - const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel(mod)); + const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_len); const element_vals = try sema.arena.alloc(InternPool.Index, dest_elems); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems); const dest_elem_ty = dest_ty.childType(mod); @@ -29430,7 +29432,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo const ptr_ty = try mod.ptrType(.{ .elem_type = decl_tv.ty.toIntern(), .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), - .is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else false, + .is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else true, .address_space = decl.@"addrspace", }); if (analyze_fn_body) { @@ -29513,7 +29515,7 @@ fn analyzeLoad( if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { if (try sema.pointerDeref(block, src, ptr_val, ptr_ty)) |elem_val| { - return sema.addConstant(elem_ty, elem_val); + return sema.addConstant(elem_ty, try mod.getCoerced(elem_val, elem_ty)); } } @@ -32610,8 +32612,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { var int_tag_ty: Type = undefined; var enum_field_names: []InternPool.NullTerminatedString = &.{}; - var enum_field_vals: []InternPool.Index = &.{}; - var enum_field_vals_map: std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false) = .{}; + var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}; var explicit_tags_seen: []bool = &.{}; var explicit_enum_info: ?InternPool.Key.EnumType = null; if (tag_type_ref != .none) { @@ -32638,9 +32639,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { }; return sema.failWithOwnedErrorMsg(msg); } + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); + try enum_field_vals.ensureTotalCapacity(sema.arena, fields_len); } - enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); - enum_field_vals = try sema.arena.alloc(InternPool.Index, fields_len); } else { // The provided type is the enum tag type. union_obj.tag_ty = provided_ty; @@ -32712,8 +32713,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk try sema.resolveInst(tag_ref); } else .none; - if (enum_field_vals.len != 0) { - const copied_val = if (tag_ref != .none) blk: { + if (enum_field_vals.capacity() > 0) { + const enum_tag_val = if (tag_ref != .none) blk: { const val = sema.semaUnionFieldVal(&block_scope, .unneeded, int_tag_ty, tag_ref) catch |err| switch (err) { error.NeededSourceLocation => { const val_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ @@ -32737,16 +32738,12 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk val; }; - enum_field_vals[field_i] = copied_val.toIntern(); - const gop = enum_field_vals_map.getOrPutAssumeCapacityContext(copied_val, .{ - .ty = int_tag_ty, - .mod = mod, - }); + const gop = enum_field_vals.getOrPutAssumeCapacity(enum_tag_val.toIntern()); if (gop.found_existing) { const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; const other_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = gop.index }).lazy; const msg = msg: { - const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, mod)}); + const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(int_tag_ty, mod)}); errdefer msg.destroy(gpa); try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{}); break :msg msg; @@ -32907,8 +32904,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { }; return sema.failWithOwnedErrorMsg(msg); } - } else if (enum_field_vals.len != 0) { - union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals, union_obj); + } else if (enum_field_vals.count() > 0) { + union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), union_obj); } else { union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, union_obj); } @@ -33180,8 +33177,12 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .struct_type => |struct_type| { const resolved_ty = try sema.resolveTypeFields(ty); if (mod.structPtrUnwrap(struct_type.index)) |s| { - for (s.fields.values(), 0..) |field, i| { - if (field.is_comptime) continue; + const field_vals = try sema.arena.alloc(InternPool.Index, s.fields.count()); + for (field_vals, s.fields.values(), 0..) |*field_val, field, i| { + if (field.is_comptime) { + field_val.* = try field.default_val.intern(field.ty, mod); + continue; + } if (field.ty.eql(resolved_ty, sema.mod)) { const msg = try Module.ErrorMsg.create( sema.gpa, @@ -33192,24 +33193,25 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{}); return sema.failWithOwnedErrorMsg(msg); } - if ((try sema.typeHasOnePossibleValue(field.ty)) == null) { - return null; - } + if (try sema.typeHasOnePossibleValue(field.ty)) |field_opv| { + field_val.* = try field_opv.intern(field.ty, mod); + } else return null; } + + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); } - // In this case the struct has no runtime-known fields and - // therefore has one possible value. - // TODO: this is incorrect for structs with comptime fields, I think - // we should use a temporary allocator to construct an aggregate that - // is populated with the comptime values and then intern that value here. - // This TODO is repeated in the redundant implementation of - // one-possible-value in type.zig. - const empty = try mod.intern(.{ .aggregate = .{ + // In this case the struct has no fields at all and + // therefore has one possible value. + return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, - } }); - return empty.toValue(); + } })).toValue(); }, .anon_struct_type => |tuple| { @@ -33268,20 +33270,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .auto, .explicit => switch (enum_type.names.len) { 0 => return Value.@"unreachable", - 1 => { - if (enum_type.values.len == 0) { - const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.toIntern(), - .int = try mod.intern(.{ .int = .{ - .ty = enum_type.tag_ty, - .storage = .{ .u64 = 0 }, - } }), - } }); - return only.toValue(); - } else { - return enum_type.values[0].toValue(); - } - }, + 1 => return try mod.getCoerced((if (enum_type.values.len == 0) + try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }) + else + enum_type.values[0]).toValue(), ty), else => return null, }, }, @@ -33427,7 +33422,7 @@ fn analyzeComptimeAlloc( // There will be stores before the first load, but they may be to sub-elements or // sub-fields. So we need to initialize with undef to allow the mechanism to expand // into fields/elements and have those overridden with stored values. - Value.undef, + (try sema.mod.intern(.{ .undef = var_type.toIntern() })).toValue(), alignment, ); const decl = sema.mod.declPtr(decl_index); @@ -34028,16 +34023,16 @@ fn intSubWithOverflow( const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); - of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return Value.OverflowArithmeticResult{ .overflow_bit = (try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), .storage = .{ .elems = overflowed_data }, } })).toValue(), .wrapped_result = (try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .ty = ty.toIntern(), .storage = .{ .elems = result_data }, } })).toValue(), }; @@ -34066,7 +34061,7 @@ fn intSubWithOverflowScalar( const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); const wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ - .overflow_bit = Value.boolToInt(overflowed), + .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)), .wrapped_result = wrapped_result, }; } @@ -34273,16 +34268,16 @@ fn intAddWithOverflow( const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); - of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return Value.OverflowArithmeticResult{ .overflow_bit = (try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), .storage = .{ .elems = overflowed_data }, } })).toValue(), .wrapped_result = (try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .ty = ty.toIntern(), .storage = .{ .elems = result_data }, } })).toValue(), }; @@ -34311,7 +34306,7 @@ fn intAddWithOverflowScalar( const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); const result = try mod.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ - .overflow_bit = Value.boolToInt(overflowed), + .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)), .wrapped_result = result, }; } @@ -34384,7 +34379,7 @@ fn compareVector( scalar.* = try Value.makeBool(res_bool).intern(Type.bool, mod); } return (try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .u1_type })).toIntern(), + .ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(), .storage = .{ .elems = result_data }, } })).toValue(); } diff --git a/src/codegen.zig b/src/codegen.zig index 1ae6d6ce0626..30ad8ab6e846 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -957,7 +957,7 @@ pub fn genTypedValue( } }, .Bool => { - return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool(mod)) }); + return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) }); }, .Optional => { if (typed_value.ty.isPtrLikeOptional(mod)) { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 1d3749f6a3d6..41db3e7a0485 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2003,7 +2003,7 @@ pub const Object = struct { mod.intern_pool.stringToSlice(tuple.names[i]) else try std.fmt.allocPrintZ(gpa, "{d}", .{i}); - defer gpa.free(field_name); + defer if (tuple.names.len == 0) gpa.free(field_name); try di_fields.append(gpa, dib.createMemberType( fwd_decl.toScope(), @@ -2461,13 +2461,13 @@ pub const DeclGen = struct { if (decl.@"linksection") |section| global.setSection(section); assert(decl.has_tv); const init_val = if (decl.val.getVariable(mod)) |variable| init_val: { - break :init_val variable.init.toValue(); + break :init_val variable.init; } else init_val: { global.setGlobalConstant(.True); - break :init_val decl.val; + break :init_val decl.val.toIntern(); }; - if (init_val.toIntern() != .unreachable_value) { - const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val }); + if (init_val != .none) { + const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val.toValue() }); if (global.globalGetValueType() == llvm_init.typeOf()) { global.setInitializer(llvm_init); } else { @@ -2748,7 +2748,7 @@ pub const DeclGen = struct { if (std.debug.runtime_safety and false) check: { if (t.zigTypeTag(mod) == .Opaque) break :check; if (!t.hasRuntimeBits(mod)) break :check; - if (!llvm_ty.isSized().toBool(mod)) break :check; + if (!llvm_ty.isSized().toBool()) break :check; const zig_size = t.abiSize(mod); const llvm_size = dg.object.target_data.abiSizeOfType(llvm_ty); @@ -3239,7 +3239,7 @@ pub const DeclGen = struct { => unreachable, // non-runtime values .false, .true => { const llvm_type = try dg.lowerType(tv.ty); - return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull(); + return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); }, }, .variable, @@ -3522,15 +3522,19 @@ pub const DeclGen = struct { const elem_ty = vector_type.child.toType(); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_type.len); defer dg.gpa.free(llvm_elems); + const llvm_i8 = dg.context.intType(8); for (llvm_elems, 0..) |*llvm_elem, i| { - llvm_elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = switch (aggregate.storage) { - .bytes => unreachable, - .elems => |elems| elems[i], - .repeated_elem => |elem| elem, - }.toValue(), - }); + llvm_elem.* = switch (aggregate.storage) { + .bytes => |bytes| llvm_i8.constInt(bytes[i], .False), + .elems => |elems| try dg.lowerValue(.{ + .ty = elem_ty, + .val = elems[i].toValue(), + }), + .repeated_elem => |elem| try dg.lowerValue(.{ + .ty = elem_ty, + .val = elem.toValue(), + }), + }; } return llvm.constVector( llvm_elems.ptr, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 64a0a7ec57bc..94ea8b7f89ed 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -654,7 +654,7 @@ pub const DeclGen = struct { .@"unreachable", .generic_poison, => unreachable, // non-runtime values - .false, .true => try self.addConstBool(val.toBool(mod)), + .false, .true => try self.addConstBool(val.toBool()), }, .variable, .extern_func, @@ -974,7 +974,6 @@ pub const DeclGen = struct { /// This function should only be called during function code generation. fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef { const mod = self.module; - const target = self.getTarget(); const result_ty_ref = try self.resolveType(ty, repr); log.debug("constant: ty = {}, val = {}", .{ ty.fmt(self.module), val.fmtValue(ty, self.module) }); @@ -991,51 +990,8 @@ pub const DeclGen = struct { return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(mod)); } }, - .Bool => switch (repr) { - .direct => return try self.spv.constBool(result_ty_ref, val.toBool(mod)), - .indirect => return try self.spv.constInt(result_ty_ref, @boolToInt(val.toBool(mod))), - }, - .Float => return switch (ty.floatBits(target)) { - 16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16, mod) } } }), - 32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32, mod) } } }), - 64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64, mod) } } }), - 80, 128 => unreachable, // TODO - else => unreachable, - }, - .ErrorSet => { - const value = switch (val.tag()) { - .@"error" => blk: { - const err_name = val.castTag(.@"error").?.data.name; - const kv = try self.module.getErrorValue(err_name); - break :blk @intCast(u16, kv.value); - }, - .zero => 0, - else => unreachable, - }; - - return try self.spv.constInt(result_ty_ref, value); - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else Value.initTag(.zero); - - const eu_layout = self.errorUnionLayout(payload_ty); - if (!eu_layout.payload_has_bits) { - return try self.constant(Type.anyerror, error_val, repr); - } - - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; - - var members: [2]IdRef = undefined; - if (eu_layout.error_first) { - members[0] = try self.constant(Type.anyerror, error_val, .indirect); - members[1] = try self.constant(payload_ty, payload_val, .indirect); - } else { - members[0] = try self.constant(payload_ty, payload_val, .indirect); - members[1] = try self.constant(Type.anyerror, error_val, .indirect); - } - return try self.spv.constComposite(result_ty_ref, &members); + .Bool => { + @compileError("TODO merge conflict failure"); }, // TODO: We can handle most pointers here (decl refs etc), because now they emit an extra // OpVariable that is not really required. diff --git a/src/type.zig b/src/type.zig index a9ad8b94fd64..ebf331ef8893 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2481,25 +2481,32 @@ pub const Type = struct { .struct_type => |struct_type| { if (mod.structPtrUnwrap(struct_type.index)) |s| { assert(s.haveFieldTypes()); - for (s.fields.values()) |field| { - if (field.is_comptime) continue; - if ((try field.ty.onePossibleValue(mod)) != null) continue; - return null; + const field_vals = try mod.gpa.alloc(InternPool.Index, s.fields.count()); + defer mod.gpa.free(field_vals); + for (field_vals, s.fields.values()) |*field_val, field| { + if (field.is_comptime) { + field_val.* = try field.default_val.intern(field.ty, mod); + continue; + } + if (try field.ty.onePossibleValue(mod)) |field_opv| { + field_val.* = try field_opv.intern(field.ty, mod); + } else return null; } + + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); } - // In this case the struct has no runtime-known fields and - // therefore has one possible value. - // TODO: this is incorrect for structs with comptime fields, I think - // we should use a temporary allocator to construct an aggregate that - // is populated with the comptime values and then intern that value here. - // This TODO is repeated in the redundant implementation of - // one-possible-value logic in Sema.zig. - const empty = try mod.intern(.{ .aggregate = .{ + // In this case the struct has no fields at all and + // therefore has one possible value. + return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, - } }); - return empty.toValue(); + } })).toValue(); }, .anon_struct_type => |tuple| { diff --git a/src/value.zig b/src/value.zig index 23b90f40df6c..dda95cbb441d 100644 --- a/src/value.zig +++ b/src/value.zig @@ -385,7 +385,7 @@ pub const Value = struct { } }); }, .aggregate => { - const old_elems = val.castTag(.aggregate).?.data; + const old_elems = val.castTag(.aggregate).?.data[0..ty.arrayLen(mod)]; const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); defer mod.gpa.free(new_elems); const ty_key = mod.intern_pool.indexToKey(ty.toIntern()); @@ -656,7 +656,7 @@ pub const Value = struct { }; } - pub fn toBool(val: Value, _: *const Module) bool { + pub fn toBool(val: Value) bool { return switch (val.toIntern()) { .bool_true => true, .bool_false => false, @@ -697,7 +697,7 @@ pub const Value = struct { switch (ty.zigTypeTag(mod)) { .Void => {}, .Bool => { - buffer[0] = @boolToInt(val.toBool(mod)); + buffer[0] = @boolToInt(val.toBool()); }, .Int, .Enum => { const int_info = ty.intInfo(mod); @@ -736,13 +736,20 @@ pub const Value = struct { }, .Struct => switch (ty.containerLayout(mod)) { .Auto => return error.IllDefinedMemoryLayout, - .Extern => { - const fields = ty.structFields(mod).values(); - const field_vals = val.castTag(.aggregate).?.data; - for (fields, 0..) |field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, mod)); - try writeToMemory(field_vals[i], field.ty, mod, buffer[off..]); - } + .Extern => for (ty.structFields(mod).values(), 0..) |field, i| { + const off = @intCast(usize, ty.structFieldOffset(i, mod)); + const field_val = switch (val.ip_index) { + .none => val.castTag(.aggregate).?.data[i], + else => switch (mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage) { + .bytes => |bytes| { + buffer[off] = bytes[i]; + continue; + }, + .elems => |elems| elems[i], + .repeated_elem => |elem| elem, + }.toValue(), + }; + try writeToMemory(field_val, field.ty, mod, buffer[off..]); }, .Packed => { const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; @@ -812,7 +819,7 @@ pub const Value = struct { .Little => bit_offset / 8, .Big => buffer.len - bit_offset / 8 - 1, }; - if (val.toBool(mod)) { + if (val.toBool()) { buffer[byte_index] |= (@as(u8, 1) << @intCast(u3, bit_offset % 8)); } else { buffer[byte_index] &= ~(@as(u8, 1) << @intCast(u3, bit_offset % 8)); @@ -1331,24 +1338,7 @@ pub const Value = struct { .gt => {}, } - const lhs_float = lhs.isFloat(mod); - const rhs_float = rhs.isFloat(mod); - if (lhs_float and rhs_float) { - const lhs_tag = lhs.tag(); - const rhs_tag = rhs.tag(); - if (lhs_tag == rhs_tag) { - const lhs_storage = mod.intern_pool.indexToKey(lhs.toIntern()).float.storage; - const rhs_storage = mod.intern_pool.indexToKey(rhs.toIntern()).float.storage; - const lhs128: f128 = switch (lhs_storage) { - inline else => |x| x, - }; - const rhs128: f128 = switch (rhs_storage) { - inline else => |x| x, - }; - return std.math.order(lhs128, rhs128); - } - } - if (lhs_float or rhs_float) { + if (lhs.isFloat(mod) or rhs.isFloat(mod)) { const lhs_f128 = lhs.toFloat(f128, mod); const rhs_f128 = rhs.toFloat(f128, mod); return std.math.order(lhs_f128, rhs_f128); @@ -1669,86 +1659,6 @@ pub const Value = struct { return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq); } - /// This function is used by hash maps and so treats floating-point NaNs as equal - /// to each other, and not equal to other floating-point values. - pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - if (val.ip_index != .none) { - // The InternPool data structure hashes based on Key to make interned objects - // unique. An Index can be treated simply as u32 value for the - // purpose of Type/Value hashing and equality. - std.hash.autoHash(hasher, val.toIntern()); - return; - } - const zig_ty_tag = ty.zigTypeTag(mod); - std.hash.autoHash(hasher, zig_ty_tag); - if (val.isUndef(mod)) return; - // The value is runtime-known and shouldn't affect the hash. - if (val.isRuntimeValue(mod)) return; - - switch (zig_ty_tag) { - .Opaque => unreachable, // Cannot hash opaque types - - .Void, - .NoReturn, - .Undefined, - .Null, - => {}, - - .Type, - .Float, - .ComptimeFloat, - .Bool, - .Int, - .ComptimeInt, - .Pointer, - .Optional, - .ErrorUnion, - .ErrorSet, - .Enum, - .EnumLiteral, - .Fn, - => unreachable, // handled via ip_index check above - .Array, .Vector => { - const len = ty.arrayLen(mod); - const elem_ty = ty.childType(mod); - var index: usize = 0; - while (index < len) : (index += 1) { - const elem_val = val.elemValue(mod, index) catch |err| switch (err) { - // Will be solved when arrays and vectors get migrated to the intern pool. - error.OutOfMemory => @panic("OOM"), - }; - elem_val.hash(elem_ty, hasher, mod); - } - }, - .Struct => { - switch (val.tag()) { - .aggregate => { - const field_values = val.castTag(.aggregate).?.data; - for (field_values, 0..) |field_val, i| { - const field_ty = ty.structFieldType(i, mod); - field_val.hash(field_ty, hasher, mod); - } - }, - else => unreachable, - } - }, - .Union => { - const union_obj = val.cast(Payload.Union).?.data; - if (ty.unionTagType(mod)) |tag_ty| { - union_obj.tag.hash(tag_ty, hasher, mod); - } - const active_field_ty = ty.unionFieldType(union_obj.tag, mod); - union_obj.val.hash(active_field_ty, hasher, mod); - }, - .Frame => { - @panic("TODO implement hashing frame values"); - }, - .AnyFrame => { - @panic("TODO implement hashing anyframe values"); - }, - } - } - /// This is a more conservative hash function that produces equal hashes for values /// that can coerce into each other. /// This function is used by hash maps and so treats floating-point NaNs as equal @@ -1820,35 +1730,6 @@ pub const Value = struct { } } - pub const ArrayHashContext = struct { - ty: Type, - mod: *Module, - - pub fn hash(self: @This(), val: Value) u32 { - const other_context: HashContext = .{ .ty = self.ty, .mod = self.mod }; - return @truncate(u32, other_context.hash(val)); - } - pub fn eql(self: @This(), a: Value, b: Value, b_index: usize) bool { - _ = b_index; - return a.eql(b, self.ty, self.mod); - } - }; - - pub const HashContext = struct { - ty: Type, - mod: *Module, - - pub fn hash(self: @This(), val: Value) u64 { - var hasher = std.hash.Wyhash.init(0); - val.hash(self.ty, &hasher, self.mod); - return hasher.final(); - } - - pub fn eql(self: @This(), a: Value, b: Value) bool { - return a.eql(b, self.ty, self.mod); - } - }; - pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool { return switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { @@ -1919,14 +1800,25 @@ pub const Value = struct { } pub fn sliceLen(val: Value, mod: *Module) u64 { - return mod.intern_pool.sliceLen(val.toIntern()).toValue().toUnsignedInt(mod); + const ptr = mod.intern_pool.indexToKey(val.toIntern()).ptr; + return switch (ptr.len) { + .none => switch (mod.intern_pool.indexToKey(switch (ptr.addr) { + .decl => |decl| mod.declPtr(decl).ty.toIntern(), + .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).ty.toIntern(), + .comptime_field => |comptime_field| mod.intern_pool.typeOf(comptime_field), + else => unreachable, + })) { + .array_type => |array_type| array_type.len, + else => 1, + }, + else => ptr.len.toValue().toUnsignedInt(mod), + }; } /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { return switch (val.ip_index) { - .undef => Value.undef, .none => switch (val.tag()) { .repeated => val.castTag(.repeated).?.data, .aggregate => val.castTag(.aggregate).?.data[index], @@ -1934,6 +1826,9 @@ pub const Value = struct { else => unreachable, }, else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => |ty| (try mod.intern(.{ + .undef = ty.toType().elemType2(mod).toIntern(), + })).toValue(), .ptr => |ptr| switch (ptr.addr) { .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), @@ -2492,7 +2387,7 @@ pub const Value = struct { } return OverflowArithmeticResult{ - .overflow_bit = boolToInt(overflowed), + .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)), .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), }; } @@ -2645,7 +2540,8 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (val.isUndef(mod)) return Value.undef; + if (val.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue(); + if (ty.toIntern() == .bool_type) return makeBool(!val.toBool()); const info = ty.intInfo(mod); @@ -2687,7 +2583,8 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue(); + if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() and rhs.toBool()); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -2725,7 +2622,8 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue(); + if (ty.toIntern() == .bool_type) return makeBool(!(lhs.toBool() and rhs.toBool())); const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty); @@ -2752,7 +2650,8 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue(); + if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() or rhs.toBool()); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -2789,7 +2688,8 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return (try mod.intern(.{ .undef = ty.toIntern() })).toValue(); + if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() != rhs.toBool()); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -3233,7 +3133,7 @@ pub const Value = struct { result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits); } return OverflowArithmeticResult{ - .overflow_bit = boolToInt(overflowed), + .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)), .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), }; } @@ -4267,12 +4167,6 @@ pub const Value = struct { return if (x) Value.true else Value.false; } - pub fn boolToInt(x: bool) Value { - const zero: Value = .{ .ip_index = .zero, .legacy = undefined }; - const one: Value = .{ .ip_index = .one, .legacy = undefined }; - return if (x) one else zero; - } - pub const RuntimeIndex = InternPool.RuntimeIndex; /// This function is used in the debugger pretty formatters in tools/ to fetch the diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 62a0f3fefc91..0bba97dcaa1a 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -354,8 +354,8 @@ def Zir_Inst__Zir_Inst_Ref_SummaryProvider(value, _=None): def Air_Inst__Air_Inst_Ref_SummaryProvider(value, _=None): members = value.type.enum_members - # ignore .none - return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned + 1 - len(members)) + # ignore .var_args_param_type and .none + return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned + 2 - len(members)) class Module_Decl__Module_Decl_Index_SynthProvider: def __init__(self, value, _=None): self.value = value @@ -365,7 +365,7 @@ def update(self): mod = frame.FindVariable('mod') or frame.FindVariable('module') if mod: break else: return - self.ptr = mod.GetChildMemberWithName('allocated_decls').GetChildAtIndex(self.value.unsigned).Clone('decl') + self.ptr = mod.GetChildMemberWithName('allocated_decls').GetChildAtIndex(self.value.unsigned).address_of.Clone('decl') except: pass def has_children(self): return True def num_children(self): return 1 From d40b83de45db27c8c3e7a1f2ccf892563df43637 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 27 May 2023 22:03:12 -0400 Subject: [PATCH 118/205] behavior: pass more tests on llvm again --- src/InternPool.zig | 47 +++++-- src/Sema.zig | 237 ++++++++++++++++++++++-------------- src/codegen/llvm.zig | 25 ++-- src/type.zig | 4 +- src/value.zig | 12 +- test/behavior/bugs/6456.zig | 2 +- 6 files changed, 208 insertions(+), 119 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 7af91529c19c..88b057870757 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4250,6 +4250,8 @@ pub fn sliceLen(ip: InternPool, i: Index) Index { /// * int <=> enum /// * enum_literal => enum /// * ptr <=> ptr +/// * opt ptr <=> ptr +/// * opt ptr <=> opt ptr /// * int => ptr /// * null_value => opt /// * payload => opt @@ -4258,9 +4260,6 @@ pub fn sliceLen(ip: InternPool, i: Index) Index { /// * error set => error union /// * payload => error union /// * fn <=> fn -/// * array <=> array -/// * array <=> vector -/// * vector <=> vector pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { const old_ty = ip.typeOf(val); if (old_ty == new_ty) return val; @@ -4270,6 +4269,15 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al return ip.get(gpa, .{ .opt = .{ .ty = new_ty, .val = .none, + } }) + else if (ip.isPointerType(new_ty)) + return ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = .{ .int = .zero_usize }, + .len = switch (ip.indexToKey(new_ty).ptr_type.size) { + .One, .Many, .C => .none, + .Slice => try ip.get(gpa, .{ .undef = .usize_type }), + }, } }), else => switch (ip.indexToKey(val)) { .undef => return ip.get(gpa, .{ .undef = new_ty }), @@ -4320,6 +4328,18 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .addr = ptr.addr, .len = ptr.len, } }), + .opt => |opt| if (ip.isPointerType(new_ty)) + return switch (opt.val) { + .none => try ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = .{ .int = .zero_usize }, + .len = switch (ip.indexToKey(new_ty).ptr_type.size) { + .One, .Many, .C => .none, + .Slice => try ip.get(gpa, .{ .undef = .usize_type }), + }, + } }), + else => try ip.getCoerced(gpa, opt.val, new_ty), + }, .err => |err| if (ip.isErrorSetType(new_ty)) return ip.get(gpa, .{ .err = .{ .ty = new_ty, @@ -4335,14 +4355,6 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .ty = new_ty, .val = error_union.val, } }), - .aggregate => |aggregate| return ip.get(gpa, .{ .aggregate = .{ - .ty = new_ty, - .storage = switch (aggregate.storage) { - .bytes => |bytes| .{ .bytes = bytes[0..@intCast(usize, ip.aggregateTypeLen(new_ty))] }, - .elems => |elems| .{ .elems = elems[0..@intCast(usize, ip.aggregateTypeLen(new_ty))] }, - .repeated_elem => |elem| .{ .repeated_elem = elem }, - }, - } }), else => {}, }, } @@ -4364,8 +4376,10 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al else => {}, } if (std.debug.runtime_safety) { - std.debug.panic("val={any} new_ty={any}\n", .{ - ip.items.get(@enumToInt(val)), ip.items.get(@enumToInt(new_ty)), + std.debug.panic("InternPool.getCoerced of {s} not implemented from {s} to {s}", .{ + @tagName(ip.indexToKey(val)), + @tagName(ip.indexToKey(old_ty)), + @tagName(ip.indexToKey(new_ty)), }); } unreachable; @@ -4507,6 +4521,13 @@ pub fn isErrorUnionType(ip: InternPool, ty: Index) bool { return ip.indexToKey(ty) == .error_union_type; } +pub fn isAggregateType(ip: InternPool, ty: Index) bool { + return switch (ip.indexToKey(ty)) { + .array_wype, .vector_type, .anon_struct_type, .struct_type => true, + else => false, + }; +} + /// The is only legal because the initializer is not part of the hash. pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { assert(ip.items.items(.tag)[@enumToInt(index)] == .variable); diff --git a/src/Sema.zig b/src/Sema.zig index 61061279e427..c1bcf53ab21f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6381,8 +6381,7 @@ fn zirCall( var input_is_error = false; const block_index = @intCast(Air.Inst.Index, block.instructions.items.len); - const func_ty_info = mod.typeToFunc(func_ty).?; - const fn_params_len = func_ty_info.param_types.len; + const fn_params_len = mod.typeToFunc(func_ty).?.param_types.len; const parent_comptime = block.is_comptime; // `extra_index` and `arg_index` are separate since the bound function is passed as the first argument. var extra_index: usize = 0; @@ -6391,6 +6390,7 @@ fn zirCall( extra_index += 1; arg_index += 1; }) { + const func_ty_info = mod.typeToFunc(func_ty).?; const arg_end = sema.code.extra[extra.end + extra_index]; defer arg_start = arg_end; @@ -6876,7 +6876,11 @@ fn analyzeCall( .args_count = @intCast(u32, func_ty_info.param_types.len), }; var delete_memoized_call_key = false; - defer if (delete_memoized_call_key) mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index); + defer if (delete_memoized_call_key) { + assert(mod.memoized_call_args.items.len >= memoized_call_key.args_index and + mod.memoized_call_args.items.len < memoized_call_key.args_index + memoized_call_key.args_count); + mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index); + }; if (is_comptime_call) { try mod.memoized_call_args.ensureUnusedCapacity(gpa, memoized_call_key.args_count); delete_memoized_call_key = true; @@ -6990,14 +6994,22 @@ fn analyzeCall( .{ .args = &mod.memoized_call_args }, ); if (gop.found_existing) { + assert(mod.memoized_call_args.items.len == memoized_call_key.args_index + memoized_call_key.args_count); + mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index); + delete_memoized_call_key = false; + // We need to use the original memoized error set instead of fn_ret_ty. const result = gop.value_ptr.*; assert(result != .none); // recursive memoization? + break :res2 try sema.addConstant(mod.intern_pool.typeOf(result).toType(), result.toValue()); } gop.value_ptr.* = .none; - delete_memoized_call_key = false; + } else if (delete_memoized_call_key) { + assert(mod.memoized_call_args.items.len == memoized_call_key.args_index + memoized_call_key.args_count); + mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index); } + delete_memoized_call_key = false; const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { @@ -14324,13 +14336,14 @@ fn zirOverflowArithmetic( const maybe_rhs_val = try sema.resolveMaybeUndefVal(rhs); const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty); + const overflow_ty = mod.intern_pool.indexToKey(tuple_ty.toIntern()).anon_struct_type.types[1].toType(); var result: struct { inst: Air.Inst.Ref = .none, wrapped: Value = Value.@"unreachable", overflow_bit: Value, } = result: { - const zero = try mod.intValue(dest_ty.scalarType(mod), 0); + const zero_bit = try mod.intValue(Type.u1, 0); switch (zir_tag) { .add_with_overflow => { // If either of the arguments is zero, `false` is returned and the other is stored @@ -14338,12 +14351,12 @@ fn zirOverflowArithmetic( // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14364,7 +14377,7 @@ fn zirOverflowArithmetic( if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; @@ -14383,9 +14396,9 @@ fn zirOverflowArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { - break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } } @@ -14393,9 +14406,9 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod)) { if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { - break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } } @@ -14417,12 +14430,12 @@ fn zirOverflowArithmetic( // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14922,13 +14935,18 @@ fn analyzeArithmetic( .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), else => unreachable, }; + const scalar_one = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } @@ -14941,7 +14959,7 @@ fn analyzeArithmetic( const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -19219,10 +19237,9 @@ fn zirReify( try names.ensureUnusedCapacity(sema.arena, len); for (0..len) |i| { const elem_val = try payload_val.elemValue(mod, i); - const struct_val = elem_val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // error_set: type, - const name_val = struct_val[0]; + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); + const name_str = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); const name_ip = try mod.intern_pool.getOrPutString(gpa, name_str); const gop = names.getOrPutAssumeCapacity(name_ip); @@ -19303,12 +19320,9 @@ fn zirReify( for (0..fields_len) |field_i| { const elem_val = try fields_val.elemValue(mod, field_i); - const field_struct_val: []const Value = elem_val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // name: []const u8 - const name_val = field_struct_val[0]; - // value: comptime_int - const value_val = field_struct_val[1]; + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); + const value_val = try elem_val.fieldValue(mod, elem_fields.getIndex("value").?); const field_name = try name_val.toAllocatedBytes( Type.slice_const_u8, @@ -19485,14 +19499,10 @@ fn zirReify( for (0..fields_len) |i| { const elem_val = try fields_val.elemValue(mod, i); - const field_struct_val = elem_val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // name: []const u8 - const name_val = field_struct_val[0]; - // type: type, - const type_val = field_struct_val[1]; - // alignment: comptime_int, - const alignment_val = field_struct_val[2]; + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); + const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex("type").?); + const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex("alignment").?); const field_name = try name_val.toAllocatedBytes( Type.slice_const_u8, @@ -19635,25 +19645,21 @@ fn zirReify( var noalias_bits: u32 = 0; for (param_types, 0..) |*param_type, i| { - const arg = try params_val.elemValue(mod, i); - const arg_val = arg.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // is_generic: bool, - const arg_is_generic = arg_val[0].toBool(); - // is_noalias: bool, - const arg_is_noalias = arg_val[1].toBool(); - // type: ?type, - const param_type_opt_val = arg_val[2]; + const elem_val = try params_val.elemValue(mod, i); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const param_is_generic_val = try elem_val.fieldValue(mod, elem_fields.getIndex("is_generic").?); + const param_is_noalias_val = try elem_val.fieldValue(mod, elem_fields.getIndex("is_noalias").?); + const opt_param_type_val = try elem_val.fieldValue(mod, elem_fields.getIndex("type").?); - if (arg_is_generic) { + if (param_is_generic_val.toBool()) { return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{}); } - const param_type_val = param_type_opt_val.optionalValue(mod) orelse + const param_type_val = opt_param_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); param_type.* = param_type_val.toIntern(); - if (arg_is_noalias) { + if (param_is_noalias_val.toBool()) { if (!param_type.toType().isPtrAtRuntime(mod)) { return sema.fail(block, src, "non-pointer parameter declared noalias", .{}); } @@ -19748,19 +19754,13 @@ fn reifyStruct( try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, i); - const field_struct_val = elem_val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // name: []const u8 - const name_val = field_struct_val[0]; - // type: type, - const type_val = field_struct_val[1]; - // default_value: ?*const anyopaque, - const default_value_val = field_struct_val[2]; - // is_comptime: bool, - const is_comptime_val = field_struct_val[3]; - // alignment: comptime_int, - const alignment_val = field_struct_val[4]; + const elem_val = try fields_val.elemValue(mod, i); + const elem_fields = mod.intern_pool.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); + const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex("type").?); + const default_value_val = try elem_val.fieldValue(mod, elem_fields.getIndex("default_value").?); + const is_comptime_val = try elem_val.fieldValue(mod, elem_fields.getIndex("is_comptime").?); + const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex("alignment").?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); @@ -19806,18 +19806,16 @@ fn reifyStruct( return sema.fail(block, src, "duplicate struct field {s}", .{field_name}); } - const default_val = if (default_value_val.optionalValue(mod)) |opt_val| blk: { - const payload_val = if (opt_val.pointerDecl(mod)) |opt_decl| - mod.declPtr(opt_decl).val - else - opt_val; - break :blk try payload_val.copy(new_decl_arena_allocator); - } else Value.@"unreachable"; + const field_ty = type_val.toType(); + const default_val = if (default_value_val.optionalValue(mod)) |opt_val| + try sema.pointerDeref(block, src, opt_val, try mod.singleConstPtrType(field_ty)) orelse + return sema.failWithNeededComptime(block, src, "struct field default value must be comptime-known") + else + Value.@"unreachable"; if (is_comptime_val.toBool() and default_val.toIntern() == .unreachable_value) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } - const field_ty = type_val.toType(); gop.value_ptr.* = .{ .ty = field_ty, .abi_align = abi_align, @@ -20386,17 +20384,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); } - return sema.addConstant(aligned_dest_ty, try mod.getCoerced(switch (mod.intern_pool.indexToKey(operand_val.toIntern())) { - .undef, .ptr => operand_val, - .opt => |opt| switch (opt.val) { - .none => if (dest_ty.ptrAllowsZero(mod)) - Value.zero_usize - else - return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}), - else => opt.val.toValue(), - }, - else => unreachable, - }, aligned_dest_ty)); + return sema.addConstant(aligned_dest_ty, try mod.getCoerced(operand_val, aligned_dest_ty)); } try sema.requireRuntimeBlock(block, src, null); @@ -20569,7 +20557,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A return sema.fail(block, ptr_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align }); } } - return sema.addConstant(dest_ty, val); + return sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src); @@ -20700,7 +20688,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(mod, i); - elem.* = try (try elem_val.byteSwap(operand_ty, mod, sema.arena)).intern(scalar_ty, mod); + elem.* = try (try elem_val.byteSwap(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod); } return sema.addConstant(operand_ty, (try mod.intern(.{ .aggregate = .{ .ty = operand_ty.toIntern(), @@ -25128,12 +25116,18 @@ fn tupleFieldValByIndex( } if (try sema.resolveMaybeUndefVal(tuple_byval)) |tuple_val| { - if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty); if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| { return sema.addConstant(field_ty, opv); } - const field_values = tuple_val.castTag(.aggregate).?.data; - return sema.addConstant(field_ty, field_values[field_index]); + return switch (mod.intern_pool.indexToKey(tuple_val.toIntern())) { + .undef => sema.addConstUndef(field_ty), + .aggregate => |aggregate| sema.addConstant(field_ty, switch (aggregate.storage) { + .bytes => |bytes| try mod.intValue(Type.u8, bytes[0]), + .elems => |elems| elems[field_index].toValue(), + .repeated_elem => |elem| elem.toValue(), + }), + else => unreachable, + }; } if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { @@ -25883,7 +25877,7 @@ fn coerceExtra( var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (maybe_inst_val) |val| { - return sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); + return sema.coerceInMemory(block, val, inst_ty, dest_ty, dest_ty_src); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addBitCast(dest_ty, inst); @@ -26072,7 +26066,7 @@ fn coerceExtra( // coercion to C pointer .C => switch (inst_ty.zigTypeTag(mod)) { .Null => { - return sema.addConstant(dest_ty, Value.null); + return sema.addConstant(dest_ty, try mod.getCoerced(Value.null, dest_ty)); }, .ComptimeInt => { const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { @@ -26548,6 +26542,68 @@ fn coerceExtra( return sema.failWithOwnedErrorMsg(msg); } +fn coerceInMemory( + sema: *Sema, + block: *Block, + val: Value, + src_ty: Type, + dst_ty: Type, + dst_ty_src: LazySrcLoc, +) CompileError!Air.Inst.Ref { + const mod = sema.mod; + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .aggregate => |aggregate| { + const dst_ty_key = mod.intern_pool.indexToKey(dst_ty.toIntern()); + const dest_len = try sema.usizeCast( + block, + dst_ty_src, + mod.intern_pool.aggregateTypeLen(dst_ty.toIntern()), + ); + direct: { + const src_ty_child = switch (mod.intern_pool.indexToKey(src_ty.toIntern())) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type, .struct_type => break :direct, + else => unreachable, + }; + const dst_ty_child = switch (dst_ty_key) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type, .struct_type => break :direct, + else => unreachable, + }; + if (src_ty_child != dst_ty_child) break :direct; + return try sema.addConstant(dst_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dst_ty.toIntern(), + .storage = switch (aggregate.storage) { + .bytes => |bytes| .{ .bytes = bytes[0..dest_len] }, + .elems => |elems| .{ .elems = elems[0..dest_len] }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, + }, + } })).toValue()); + } + const dest_elems = try sema.arena.alloc(InternPool.Index, dest_len); + for (dest_elems, 0..) |*dest_elem, i| { + const elem_ty = switch (dst_ty_key) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type => |anon_struct_type| anon_struct_type.types[i], + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).? + .fields.values()[i].ty.toIntern(), + else => unreachable, + }; + dest_elem.* = try mod.intern_pool.getCoerced(mod.gpa, switch (aggregate.storage) { + .bytes => |bytes| (try mod.intValue(Type.u8, bytes[i])).toIntern(), + .elems => |elems| elems[i], + .repeated_elem => |elem| elem, + }, elem_ty); + } + return sema.addConstant(dst_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dst_ty.toIntern(), + .storage = .{ .elems = dest_elems }, + } })).toValue()); + }, + else => return sema.addConstant(dst_ty, try mod.getCoerced(val, dst_ty)), + } +} + const InMemoryCoercionResult = union(enum) { ok, no_match: Pair, @@ -28619,7 +28675,11 @@ fn coerceArrayPtrToSlice( const array_ty = ptr_array_ty.childType(mod); const slice_val = try mod.intern(.{ .ptr = .{ .ty = dest_ty.toIntern(), - .addr = mod.intern_pool.indexToKey(val.toIntern()).ptr.addr, + .addr = switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => .{ .int = try mod.intern(.{ .undef = .usize_type }) }, + .ptr => |ptr| ptr.addr, + else => unreachable, + }, .len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(), } }); return sema.addConstant(dest_ty, slice_val.toValue()); @@ -28962,7 +29022,7 @@ fn coerceArrayLike( if (in_memory_result == .ok) { if (try sema.resolveMaybeUndefVal(inst)) |inst_val| { // These types share the same comptime value representation. - return sema.addConstant(dest_ty, try mod.getCoerced(inst_val, dest_ty)); + return sema.coerceInMemory(block, inst_val, inst_ty, dest_ty, dest_ty_src); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addBitCast(dest_ty, inst); @@ -33599,9 +33659,8 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { const mod = sema.mod; return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.size) { + .One, .Many, .C => ty, .Slice => null, - .C => ptr_type.elem_type.toType(), - .One, .Many => ty, }, .opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) { .ptr_type => |ptr_type| switch (ptr_type.size) { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 41db3e7a0485..5ef92c6e4667 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3396,7 +3396,7 @@ pub const DeclGen = struct { const llvm_ptr_val = switch (ptr.addr) { .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl), .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl), - .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), + .int => |int| try dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int)), .eu_payload, .opt_payload, .elem, @@ -3796,11 +3796,20 @@ pub const DeclGen = struct { } } - fn lowerIntAsPtr(dg: *DeclGen, int: InternPool.Key.Int) *llvm.Value { - var bigint_space: Value.BigIntSpace = undefined; - const bigint = int.storage.toBigInt(&bigint_space); - const llvm_int = lowerBigInt(dg, Type.usize, bigint); - return llvm_int.constIntToPtr(dg.context.pointerType(0)); + fn lowerIntAsPtr(dg: *DeclGen, int_key: InternPool.Key) Error!*llvm.Value { + switch (int_key) { + .undef => { + const llvm_usize = try dg.lowerType(Type.usize); + return llvm_usize.getUndef(); + }, + .int => |int| { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int.storage.toBigInt(&bigint_space); + const llvm_int = lowerBigInt(dg, Type.usize, bigint); + return llvm_int.constIntToPtr(dg.context.pointerType(0)); + }, + else => unreachable, + } } fn lowerBigInt(dg: *DeclGen, ty: Type, bigint: std.math.big.int.Const) *llvm.Value { @@ -3848,11 +3857,11 @@ pub const DeclGen = struct { const mod = dg.module; const target = mod.getTarget(); return switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { - .int => |int| dg.lowerIntAsPtr(int), + .int => |int| dg.lowerIntAsPtr(.{ .int = int }), .ptr => |ptr| switch (ptr.addr) { .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), .mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl), - .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), + .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int)), .eu_payload => |eu_ptr| { const parent_llvm_ptr = try dg.lowerParentPtr(eu_ptr.toValue(), true); diff --git a/src/type.zig b/src/type.zig index ebf331ef8893..0e30debf0a17 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2788,7 +2788,7 @@ pub const Type = struct { // Works for vectors and vectors of integers. pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value { - const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty); + const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .repeated_elem = scalar.toIntern() }, @@ -2817,7 +2817,7 @@ pub const Type = struct { // Works for vectors and vectors of integers. /// The returned Value will have type dest_ty. pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { - const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty); + const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty.scalarType(mod)); return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ .ty = dest_ty.toIntern(), .storage = .{ .repeated_elem = scalar.toIntern() }, diff --git a/src/value.zig b/src/value.zig index dda95cbb441d..a9fb906c0b14 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2340,16 +2340,16 @@ pub const Value = struct { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); - of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return OverflowArithmeticResult{ .overflow_bit = (try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), .storage = .{ .elems = overflowed_data }, } })).toValue(), .wrapped_result = (try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .ty = ty.toIntern(), .storage = .{ .elems = result_data }, } })).toValue(), }; @@ -3090,16 +3090,16 @@ pub const Value = struct { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); - of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return OverflowArithmeticResult{ .overflow_bit = (try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), .storage = .{ .elems = overflowed_data }, } })).toValue(), .wrapped_result = (try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .ty = ty.toIntern(), .storage = .{ .elems = result_data }, } })).toValue(), }; diff --git a/test/behavior/bugs/6456.zig b/test/behavior/bugs/6456.zig index 297c9c742380..31dea02cf67f 100644 --- a/test/behavior/bugs/6456.zig +++ b/test/behavior/bugs/6456.zig @@ -24,7 +24,7 @@ test "issue 6456" { .alignment = 0, .name = name, .type = usize, - .default_value = &@as(?usize, null), + .default_value = null, .is_comptime = false, }}; } From 3b6ca1d35b950d67fff5964f0063dadf01f30e2d Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 28 May 2023 02:41:22 -0400 Subject: [PATCH 119/205] Module: move memoized data to the intern pool This avoids memory management bugs with the previous implementation. --- src/InternPool.zig | 109 +++++++++++++++++++++++- src/Module.zig | 70 +++------------ src/Sema.zig | 173 +++++++++++++++++++++----------------- src/TypedValue.zig | 3 + src/arch/wasm/CodeGen.zig | 3 + src/codegen.zig | 3 + src/codegen/c.zig | 6 +- src/codegen/llvm.zig | 3 + src/codegen/spirv.zig | 3 + src/type.zig | 27 ++++++ src/value.zig | 4 + 11 files changed, 264 insertions(+), 140 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 88b057870757..7ff49c425998 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -217,6 +217,11 @@ pub const Key = union(enum) { /// An instance of a union. un: Union, + /// A declaration with a memoized value. + memoized_decl: MemoizedDecl, + /// A comptime function call with a memoized result. + memoized_call: Key.MemoizedCall, + pub const IntType = std.builtin.Type.Int; pub const ErrorUnionType = struct { @@ -609,6 +614,17 @@ pub const Key = union(enum) { }; }; + pub const MemoizedDecl = struct { + val: Index, + decl: Module.Decl.Index, + }; + + pub const MemoizedCall = struct { + func: Module.Fn.Index, + arg_values: []const Index, + result: Index, + }; + pub fn hash32(key: Key, ip: *const InternPool) u32 { return @truncate(u32, key.hash64(ip)); } @@ -786,6 +802,13 @@ pub const Key = union(enum) { std.hash.autoHash(hasher, func_type.is_generic); std.hash.autoHash(hasher, func_type.is_noinline); }, + + .memoized_decl => |memoized_decl| std.hash.autoHash(hasher, memoized_decl.val), + + .memoized_call => |memoized_call| { + std.hash.autoHash(hasher, memoized_call.func); + for (memoized_call.arg_values) |arg| std.hash.autoHash(hasher, arg); + }, } } @@ -1054,6 +1077,17 @@ pub const Key = union(enum) { a_info.is_generic == b_info.is_generic and a_info.is_noinline == b_info.is_noinline; }, + + .memoized_decl => |a_info| { + const b_info = b.memoized_decl; + return a_info.val == b_info.val; + }, + + .memoized_call => |a_info| { + const b_info = b.memoized_call; + return a_info.func == b_info.func and + std.mem.eql(Index, a_info.arg_values, b_info.arg_values); + }, } } @@ -1105,6 +1139,10 @@ pub const Key = union(enum) { .@"unreachable" => .noreturn_type, .generic_poison => .generic_poison_type, }, + + .memoized_decl, + .memoized_call, + => unreachable, }; } }; @@ -1380,6 +1418,14 @@ pub const Index = enum(u32) { bytes: struct { data: *Bytes }, aggregate: struct { data: *Aggregate }, repeated: struct { data: *Repeated }, + + memoized_decl: struct { data: *Key.MemoizedDecl }, + memoized_call: struct { + const @"data.args_len" = opaque {}; + data: *MemoizedCall, + @"trailing.arg_values.len": *@"data.args_len", + trailing: struct { arg_values: []Index }, + }, }) void { _ = self; const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields; @@ -1875,6 +1921,13 @@ pub const Tag = enum(u8) { /// An instance of an array or vector with every element being the same value. /// data is extra index to `Repeated`. repeated, + + /// A memoized declaration value. + /// data is extra index to `Key.MemoizedDecl` + memoized_decl, + /// A memoized comptime function call result. + /// data is extra index to `MemoizedFunc` + memoized_call, }; /// Trailing: @@ -2271,6 +2324,14 @@ pub const Float128 = struct { } }; +/// Trailing: +/// 0. arg value: Index for each args_len +pub const MemoizedCall = struct { + func: Module.Fn.Index, + args_len: u32, + result: Index, +}; + pub fn init(ip: *InternPool, gpa: Allocator) !void { assert(ip.items.len == 0); @@ -2758,6 +2819,16 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .enum_literal => .{ .enum_literal = @intToEnum(NullTerminatedString, data) }, .enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) }, + + .memoized_decl => .{ .memoized_decl = ip.extraData(Key.MemoizedDecl, data) }, + .memoized_call => { + const extra = ip.extraDataTrail(MemoizedCall, data); + return .{ .memoized_call = .{ + .func = extra.data.func, + .arg_values = @ptrCast([]const Index, ip.extra.items[extra.end..][0..extra.data.args_len]), + .result = extra.data.result, + } }; + }, }; } @@ -3724,6 +3795,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = try ip.addExtra(gpa, un), }); }, + + .memoized_decl => |memoized_decl| { + assert(memoized_decl.val != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .memoized_decl, + .data = try ip.addExtra(gpa, memoized_decl), + }); + }, + + .memoized_call => |memoized_call| { + for (memoized_call.arg_values) |arg| assert(arg != .none); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(MemoizedCall).Struct.fields.len + + memoized_call.arg_values.len); + ip.items.appendAssumeCapacity(.{ + .tag = .memoized_call, + .data = ip.addExtraAssumeCapacity(MemoizedCall{ + .func = memoized_call.func, + .args_len = @intCast(u32, memoized_call.arg_values.len), + .result = memoized_call.result, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, memoized_call.arg_values)); + }, } return @intToEnum(Index, ip.items.len - 1); } @@ -3788,7 +3882,7 @@ pub fn getIncompleteEnum( ip: *InternPool, gpa: Allocator, enum_type: Key.IncompleteEnumType, -) Allocator.Error!InternPool.IncompleteEnumType { +) Allocator.Error!IncompleteEnumType { switch (enum_type.tag_mode) { .auto => return getIncompleteEnumAuto(ip, gpa, enum_type), .explicit => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_explicit), @@ -3800,7 +3894,7 @@ pub fn getIncompleteEnumAuto( ip: *InternPool, gpa: Allocator, enum_type: Key.IncompleteEnumType, -) Allocator.Error!InternPool.IncompleteEnumType { +) Allocator.Error!IncompleteEnumType { // Although the integer tag type will not be stored in the `EnumAuto` struct, // `InternPool` logic depends on it being present so that `typeOf` can be infallible. // Ensure it is present here: @@ -3849,7 +3943,7 @@ fn getIncompleteEnumExplicit( gpa: Allocator, enum_type: Key.IncompleteEnumType, tag: Tag, -) Allocator.Error!InternPool.IncompleteEnumType { +) Allocator.Error!IncompleteEnumType { // We must keep the map in sync with `items`. The hash and equality functions // for enum types only look at the decl field, which is present even in // an `IncompleteEnumType`. @@ -4704,6 +4798,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .func => @sizeOf(Key.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl), .only_possible_value => 0, .union_value => @sizeOf(Key.Union), + + .memoized_decl => @sizeOf(Key.MemoizedDecl), + .memoized_call => b: { + const info = ip.extraData(MemoizedCall, data); + break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); + }, }); } const SortContext = struct { @@ -5215,6 +5315,9 @@ pub fn zigTypeTagOrPoison(ip: InternPool, index: Index) error{GenericPoison}!std .bytes, .aggregate, .repeated, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, .none => unreachable, // special tag diff --git a/src/Module.zig b/src/Module.zig index 314e636bab52..fe9c59583abd 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -88,18 +88,10 @@ embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, /// Stores all Type and Value objects; periodically garbage collected. intern_pool: InternPool = .{}, -/// This is currently only used for string literals, however the end-game once the lang spec -/// is settled will be to make this behavior consistent across all types. -memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, - /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. monomorphed_funcs: MonomorphedFuncsSet = .{}, -/// The set of all comptime function calls that have been cached so that future calls -/// with the same parameters will get the same return value. -memoized_calls: MemoizedCallSet = .{}, -memoized_call_args: MemoizedCall.Args = .{}, /// Contains the values from `@setAlignStack`. A sparse table is used here /// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while /// functions are many. @@ -223,42 +215,6 @@ const MonomorphedFuncsContext = struct { } }; -pub const MemoizedCallSet = std.HashMapUnmanaged( - MemoizedCall.Key, - MemoizedCall.Result, - MemoizedCall, - std.hash_map.default_max_load_percentage, -); - -pub const MemoizedCall = struct { - args: *const Args, - - pub const Args = std.ArrayListUnmanaged(InternPool.Index); - - pub const Key = struct { - func: Fn.Index, - args_index: u32, - args_count: u32, - - pub fn args(key: Key, ctx: MemoizedCall) []InternPool.Index { - return ctx.args.items[key.args_index..][0..key.args_count]; - } - }; - - pub const Result = InternPool.Index; - - pub fn eql(ctx: MemoizedCall, a: Key, b: Key) bool { - return a.func == b.func and mem.eql(InternPool.Index, a.args(ctx), b.args(ctx)); - } - - pub fn hash(ctx: MemoizedCall, key: Key) u64 { - var hasher = std.hash.Wyhash.init(0); - std.hash.autoHash(&hasher, key.func); - std.hash.autoHashStrat(&hasher, key.args(ctx), .Deep); - return hasher.final(); - } -}; - pub const SetAlignStack = struct { alignment: u32, /// TODO: This needs to store a non-lazy source location for the case of an inline function @@ -605,7 +561,6 @@ pub const Decl = struct { } mod.destroyFunc(func); } - _ = mod.memoized_decls.remove(decl.val.ip_index); if (decl.value_arena) |value_arena| { value_arena.deinit(gpa); decl.value_arena = null; @@ -3314,8 +3269,6 @@ pub fn deinit(mod: *Module) void { mod.test_functions.deinit(gpa); mod.align_stack_fns.deinit(gpa); mod.monomorphed_funcs.deinit(gpa); - mod.memoized_call_args.deinit(gpa); - mod.memoized_calls.deinit(gpa); mod.decls_free_list.deinit(gpa); mod.allocated_decls.deinit(gpa); @@ -3325,8 +3278,6 @@ pub fn deinit(mod: *Module) void { mod.namespaces_free_list.deinit(gpa); mod.allocated_namespaces.deinit(gpa); - mod.memoized_decls.deinit(gpa); - mod.intern_pool.deinit(gpa); } @@ -5438,6 +5389,17 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { mod.destroyDecl(decl_index); } +/// Finalize the creation of an anon decl. +pub fn finalizeAnonDecl(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { + // The Decl starts off with alive=false and the codegen backend will set alive=true + // if the Decl is referenced by an instruction or another constant. Otherwise, + // the Decl will be garbage collected by the `codegen_decl` task instead of sent + // to the linker. + if (mod.declPtr(decl_index).ty.isFnOrHasRuntimeBits(mod)) { + try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = decl_index }); + } +} + /// Delete all the Export objects that are caused by this Decl. Re-analysis of /// this Decl will cause them to be re-created (or not). fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { @@ -5875,7 +5837,7 @@ pub fn initNewAnonDecl( namespace: Namespace.Index, typed_value: TypedValue, name: [:0]u8, -) !void { +) Allocator.Error!void { assert(typed_value.ty.toIntern() == mod.intern_pool.typeOf(typed_value.val.toIntern())); errdefer mod.gpa.free(name); @@ -5892,14 +5854,6 @@ pub fn initNewAnonDecl( new_decl.generation = mod.generation; try mod.namespacePtr(namespace).anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); - - // The Decl starts off with alive=false and the codegen backend will set alive=true - // if the Decl is referenced by an instruction or another constant. Otherwise, - // the Decl will be garbage collected by the `codegen_decl` task instead of sent - // to the linker. - if (typed_value.ty.isFnOrHasRuntimeBits(mod)) { - try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index }); - } } pub fn errNoteNonLazy( diff --git a/src/Sema.zig b/src/Sema.zig index c1bcf53ab21f..7562794d25fd 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -734,6 +734,7 @@ pub const Block = struct { errdefer sema.mod.abortAnonDecl(new_decl_index); try new_decl.finalizeNewArena(&wad.new_decl_arena); wad.finished = true; + try sema.mod.finalizeAnonDecl(new_decl_index); return new_decl_index; } }; @@ -2292,7 +2293,7 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { defer reference_stack.deinit(); // Avoid infinite loops. - var seen = std.AutoHashMap(Module.Decl.Index, void).init(gpa); + var seen = std.AutoHashMap(Decl.Index, void).init(gpa); defer seen.deinit(); var cur_reference_trace: u32 = 0; @@ -2742,7 +2743,9 @@ fn zirStructDecl( try sema.analyzeStructDecl(new_decl, inst, struct_index); try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn createAnonymousDeclTypeNamed( @@ -2941,6 +2944,7 @@ fn zirEnumDecl( new_namespace.ty = incomplete_enum.index.toType(); const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); done = true; const int_tag_ty = ty: { @@ -3193,7 +3197,9 @@ fn zirUnionDecl( _ = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirOpaqueDecl( @@ -3257,7 +3263,9 @@ fn zirOpaqueDecl( extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirErrorSetDecl( @@ -3298,7 +3306,9 @@ fn zirErrorSetDecl( new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { @@ -5133,32 +5143,35 @@ fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins return sema.addStrLit(block, bytes); } -fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air.Inst.Ref { - // `zir_bytes` references memory inside the ZIR module, which can get deallocated - // after semantic analysis is complete, for example in the case of the initialization - // expression of a variable declaration. +fn addStrLit(sema: *Sema, block: *Block, bytes: []const u8) CompileError!Air.Inst.Ref { const mod = sema.mod; - const gpa = sema.gpa; - const ty = try mod.arrayType(.{ - .len = zir_bytes.len, - .child = .u8_type, - .sentinel = .zero_u8, - }); - const val = try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .bytes = zir_bytes }, - } }); - const gop = try mod.memoized_decls.getOrPut(gpa, val); - if (!gop.found_existing) { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); + const memoized_decl_index = memoized: { + const ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); + const val = try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = bytes }, + } }); - const decl_index = try anon_decl.finish(ty, val.toValue(), 0); + _ = try sema.typeHasRuntimeBits(ty); + const new_decl_index = try mod.createAnonymousDecl(block, .{ .ty = ty, .val = val.toValue() }); + errdefer mod.abortAnonDecl(new_decl_index); - gop.key_ptr.* = val; - gop.value_ptr.* = decl_index; - } - return sema.analyzeDeclRef(gop.value_ptr.*); + const memoized_index = try mod.intern(.{ .memoized_decl = .{ + .val = val, + .decl = new_decl_index, + } }); + const memoized_decl_index = mod.intern_pool.indexToKey(memoized_index).memoized_decl.decl; + if (memoized_decl_index != new_decl_index) + mod.abortAnonDecl(new_decl_index) + else + try mod.finalizeAnonDecl(new_decl_index); + break :memoized memoized_decl_index; + }; + return sema.analyzeDeclRef(memoized_decl_index); } fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -6868,30 +6881,15 @@ fn analyzeCall( defer child_block.instructions.deinit(gpa); defer merges.deinit(gpa); - // If it's a comptime function call, we need to memoize it as long as no external - // comptime memory is mutated. - var memoized_call_key = Module.MemoizedCall.Key{ - .func = module_fn_index, - .args_index = @intCast(u32, mod.memoized_call_args.items.len), - .args_count = @intCast(u32, func_ty_info.param_types.len), - }; - var delete_memoized_call_key = false; - defer if (delete_memoized_call_key) { - assert(mod.memoized_call_args.items.len >= memoized_call_key.args_index and - mod.memoized_call_args.items.len < memoized_call_key.args_index + memoized_call_key.args_count); - mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index); - }; - if (is_comptime_call) { - try mod.memoized_call_args.ensureUnusedCapacity(gpa, memoized_call_key.args_count); - delete_memoized_call_key = true; - } - try sema.emitBackwardBranch(block, call_src); - // Whether this call should be memoized, set to false if the call can mutate - // comptime state. + // Whether this call should be memoized, set to false if the call can mutate comptime state. var should_memoize = true; + // If it's a comptime function call, we need to memoize it as long as no external + // comptime memory is mutated. + const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); + var new_fn_info = mod.typeToFunc(fn_owner_decl.ty).?; new_fn_info.param_types = try sema.arena.alloc(InternPool.Index, new_fn_info.param_types.len); new_fn_info.comptime_bits = 0; @@ -6918,6 +6916,7 @@ fn analyzeCall( uncasted_args, is_comptime_call, &should_memoize, + memoized_arg_values, mod.typeToFunc(func_ty).?.param_types, func, &has_comptime_args, @@ -6935,6 +6934,7 @@ fn analyzeCall( uncasted_args, is_comptime_call, &should_memoize, + memoized_arg_values, mod.typeToFunc(func_ty).?.param_types, func, &has_comptime_args, @@ -6988,28 +6988,18 @@ fn analyzeCall( // bug generating invalid LLVM IR. const res2: Air.Inst.Ref = res2: { if (should_memoize and is_comptime_call) { - const gop = try mod.memoized_calls.getOrPutContext( - gpa, - memoized_call_key, - .{ .args = &mod.memoized_call_args }, - ); - if (gop.found_existing) { - assert(mod.memoized_call_args.items.len == memoized_call_key.args_index + memoized_call_key.args_count); - mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index); - delete_memoized_call_key = false; - - // We need to use the original memoized error set instead of fn_ret_ty. - const result = gop.value_ptr.*; - assert(result != .none); // recursive memoization? - - break :res2 try sema.addConstant(mod.intern_pool.typeOf(result).toType(), result.toValue()); + if (mod.intern_pool.getIfExists(.{ .memoized_call = .{ + .func = module_fn_index, + .arg_values = memoized_arg_values, + .result = .none, + } })) |memoized_call_index| { + const memoized_call = mod.intern_pool.indexToKey(memoized_call_index).memoized_call; + break :res2 try sema.addConstant( + mod.intern_pool.typeOf(memoized_call.result).toType(), + memoized_call.result.toValue(), + ); } - gop.value_ptr.* = .none; - } else if (delete_memoized_call_key) { - assert(mod.memoized_call_args.items.len == memoized_call_key.args_index + memoized_call_key.args_count); - mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index); } - delete_memoized_call_key = false; const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { @@ -7067,10 +7057,14 @@ fn analyzeCall( if (should_memoize and is_comptime_call) { const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, ""); - mod.memoized_calls.getPtrContext( - memoized_call_key, - .{ .args = &mod.memoized_call_args }, - ).?.* = try result_val.intern(fn_ret_ty, mod); + + // TODO: check whether any external comptime memory was mutated by the + // comptime function call. If so, then do not memoize the call here. + _ = try mod.intern(.{ .memoized_call = .{ + .func = module_fn_index, + .arg_values = memoized_arg_values, + .result = try result_val.intern(fn_ret_ty, mod), + } }); } break :res2 result; @@ -7216,6 +7210,7 @@ fn analyzeInlineCallArg( uncasted_args: []const Air.Inst.Ref, is_comptime_call: bool, should_memoize: *bool, + memoized_arg_values: []InternPool.Index, raw_param_types: []const InternPool.Index, func_inst: Air.Inst.Ref, has_comptime_args: *bool, @@ -7279,7 +7274,7 @@ fn analyzeInlineCallArg( }, } should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(mod); - mod.memoized_call_args.appendAssumeCapacity(try arg_val.intern(param_ty.toType(), mod)); + memoized_arg_values[arg_i.*] = try arg_val.intern(param_ty.toType(), mod); } else { sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg); } @@ -7315,7 +7310,7 @@ fn analyzeInlineCallArg( }, } should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(mod); - mod.memoized_call_args.appendAssumeCapacity(try arg_val.intern(sema.typeOf(uncasted_arg), mod)); + memoized_arg_values[arg_i.*] = try arg_val.intern(sema.typeOf(uncasted_arg), mod); } else { if (zir_tags[inst] == .param_anytype_comptime) { _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); @@ -19363,7 +19358,9 @@ fn zirReify( } } - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; }, .Opaque => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); @@ -19407,7 +19404,9 @@ fn zirReify( new_namespace.ty = opaque_ty.toType(); try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; }, .Union => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); @@ -19604,7 +19603,9 @@ fn zirReify( } try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; }, .Fn => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); @@ -19902,7 +19903,9 @@ fn reifyStruct( } try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { @@ -31865,6 +31868,9 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; @@ -32997,6 +33003,8 @@ fn generateUnionTagTypeNumbered( .ty = Type.type, .val = undefined, }, name); + errdefer mod.abortAnonDecl(new_decl_index); + const new_decl = mod.declPtr(new_decl_index); new_decl.name_fully_qualified = true; new_decl.owns_tv = true; @@ -33016,6 +33024,7 @@ fn generateUnionTagTypeNumbered( new_decl.val = enum_ty.toValue(); + try mod.finalizeAnonDecl(new_decl_index); return enum_ty.toType(); } @@ -33049,6 +33058,7 @@ fn generateUnionTagTypeSimple( mod.declPtr(new_decl_index).name_fully_qualified = true; break :new_decl_index new_decl_index; }; + errdefer mod.abortAnonDecl(new_decl_index); const enum_ty = try mod.intern(.{ .enum_type = .{ .decl = new_decl_index, @@ -33066,6 +33076,7 @@ fn generateUnionTagTypeSimple( new_decl.owns_tv = true; new_decl.val = enum_ty.toValue(); + try mod.finalizeAnonDecl(new_decl_index); return enum_ty.toType(); } @@ -33358,6 +33369,9 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; @@ -33843,6 +33857,9 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 8770917a0123..f252e8479131 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -278,6 +278,9 @@ pub fn print( } else try writer.writeAll("..."); return writer.writeAll(" }"); }, + .memoized_decl, + .memoized_call, + => unreachable, }, }; } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 91743e0d643f..d2d54a69c5df 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3254,6 +3254,9 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { else => unreachable, }, .un => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), + .memoized_decl, + .memoized_call, + => unreachable, } } diff --git a/src/codegen.zig b/src/codegen.zig index 30ad8ab6e846..d6520ae7e533 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -605,6 +605,9 @@ pub fn generateSymbol( } } }, + .memoized_decl, + .memoized_call, + => unreachable, } return .ok; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a8e207765272..e6ce72f48e5b 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1090,6 +1090,7 @@ pub const DeclGen = struct { }; switch (mod.intern_pool.indexToKey(val.ip_index)) { + // types, not values .int_type, .ptr_type, .array_type, @@ -1106,7 +1107,10 @@ pub const DeclGen = struct { .func_type, .error_set_type, .inferred_error_set_type, - => unreachable, // types, not values + // memoization, not values + .memoized_decl, + .memoized_call, + => unreachable, .undef, .runtime_value => unreachable, // handled above .simple_value => |simple_value| switch (simple_value) { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5ef92c6e4667..398a4124ccb5 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3793,6 +3793,9 @@ pub const DeclGen = struct { return llvm_union_ty.constNamedStruct(&fields, fields_len); } }, + .memoized_decl, + .memoized_call, + => unreachable, } } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 94ea8b7f89ed..79c968232500 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -830,6 +830,9 @@ pub const DeclGen = struct { try self.addUndef(layout.padding); }, + .memoized_decl, + .memoized_call, + => unreachable, } } }; diff --git a/src/type.zig b/src/type.zig index 0e30debf0a17..bc2ce6fc7e74 100644 --- a/src/type.zig +++ b/src/type.zig @@ -400,6 +400,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, } } @@ -613,6 +616,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; @@ -719,6 +725,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }; } @@ -1050,6 +1059,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, } @@ -1464,6 +1476,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, } @@ -1695,6 +1710,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, } } @@ -2250,6 +2268,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; @@ -2586,6 +2607,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; @@ -2728,6 +2752,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; diff --git a/src/value.zig b/src/value.zig index a9fb906c0b14..a63b83daae5a 100644 --- a/src/value.zig +++ b/src/value.zig @@ -476,6 +476,10 @@ pub const Value = struct { .tag = un.tag.toValue(), .val = un.val.toValue(), }), + + .memoized_decl, + .memoized_call, + => unreachable, }; } From 3064d2aa7b9a8ea836cb70884b0640fe902ecc29 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 28 May 2023 10:33:59 -0400 Subject: [PATCH 120/205] behavior: additional llvm fixes --- src/InternPool.zig | 35 +++++---- src/Module.zig | 6 +- src/Sema.zig | 133 ++++++++++++++++++----------------- src/TypedValue.zig | 12 ++-- src/arch/aarch64/CodeGen.zig | 2 +- src/arch/arm/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/wasm/CodeGen.zig | 2 +- src/codegen.zig | 2 +- src/codegen/llvm.zig | 8 +-- src/codegen/spirv.zig | 6 +- src/type.zig | 54 +++++++------- src/value.zig | 33 +++++---- 13 files changed, 160 insertions(+), 137 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 7ff49c425998..55ab58c391fa 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3131,7 +3131,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .enum_type => |enum_type| { - assert(enum_type.tag_ty != .none); + assert(enum_type.tag_ty == .noreturn_type or ip.isIntegerType(enum_type.tag_ty)); + for (enum_type.values) |value| assert(ip.typeOf(value) == enum_type.tag_ty); assert(enum_type.names_map == .none); assert(enum_type.values_map == .none); @@ -3622,14 +3623,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { if (bytes.len != len) { assert(bytes.len == len_including_sentinel); assert(bytes[len] == ip.indexToKey(sentinel).int.storage.u64); - unreachable; } }, .elems => |elems| { if (elems.len != len) { assert(elems.len == len_including_sentinel); assert(elems[len] == sentinel); - unreachable; } }, .repeated_elem => |elem| { @@ -3832,7 +3831,7 @@ pub const IncompleteEnumType = struct { values_start: u32, pub fn setTagType(self: @This(), ip: *InternPool, tag_ty: Index) void { - assert(tag_ty != .none); + assert(tag_ty == .noreturn_type or ip.isIntegerType(tag_ty)); ip.extra.items[self.tag_ty_index] = @enumToInt(tag_ty); } @@ -3863,6 +3862,7 @@ pub const IncompleteEnumType = struct { gpa: Allocator, value: Index, ) Allocator.Error!?u32 { + assert(ip.typeOf(value) == @intToEnum(Index, ip.extra.items[self.tag_ty_index])); const map = &ip.maps.items[@enumToInt(self.values_map.unwrap().?)]; const field_index = map.count(); const indexes = ip.extra.items[self.values_start..][0..field_index]; @@ -4346,7 +4346,7 @@ pub fn sliceLen(ip: InternPool, i: Index) Index { /// * ptr <=> ptr /// * opt ptr <=> ptr /// * opt ptr <=> opt ptr -/// * int => ptr +/// * int <=> ptr /// * null_value => opt /// * payload => opt /// * error set <=> error set @@ -4386,18 +4386,18 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .ty = new_ty, .index = func.index, } }), - .int => |int| if (ip.isIntegerType(new_ty)) - return getCoercedInts(ip, gpa, int, new_ty) - else if (ip.isEnumType(new_ty)) - return ip.get(gpa, .{ .enum_tag = .{ + .int => |int| switch (ip.indexToKey(new_ty)) { + .enum_type => |enum_type| return ip.get(gpa, .{ .enum_tag = .{ .ty = new_ty, - .int = val, - } }) - else if (ip.isPointerType(new_ty)) - return ip.get(gpa, .{ .ptr = .{ + .int = try ip.getCoerced(gpa, val, enum_type.tag_ty), + } }), + .ptr_type => return ip.get(gpa, .{ .ptr = .{ .ty = new_ty, - .addr = .{ .int = val }, + .addr = .{ .int = try ip.getCoerced(gpa, val, .usize_type) }, } }), + else => if (ip.isIntegerType(new_ty)) + return getCoercedInts(ip, gpa, int, new_ty), + }, .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty)) return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty), .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) { @@ -4421,7 +4421,12 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .ty = new_ty, .addr = ptr.addr, .len = ptr.len, - } }), + } }) + else if (ip.isIntegerType(new_ty)) + switch (ptr.addr) { + .int => |int| return ip.getCoerced(gpa, int, new_ty), + else => {}, + }, .opt => |opt| if (ip.isPointerType(new_ty)) return switch (opt.val) { .none => try ip.get(gpa, .{ .ptr = .{ diff --git a/src/Module.zig b/src/Module.zig index fe9c59583abd..d11a11cf0835 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -707,6 +707,10 @@ pub const Decl = struct { return TypedValue{ .ty = decl.ty, .val = decl.val }; } + pub fn internValue(decl: Decl, mod: *Module) Allocator.Error!InternPool.Index { + return decl.val.intern(decl.ty, mod); + } + pub fn isFunction(decl: Decl, mod: *const Module) !bool { const tv = try decl.typedValue(); return tv.ty.zigTypeTag(mod) == .Fn; @@ -7073,7 +7077,7 @@ pub fn atomicPtrAlignment( const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, - .Enum => try ty.intTagType(mod), + .Enum => ty.intTagType(mod), .Float => { const bit_count = ty.floatBits(target); if (bit_count > max_atomic_bits) { diff --git a/src/Sema.zig b/src/Sema.zig index 7562794d25fd..7ad7b1a8a305 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3291,7 +3291,8 @@ fn zirErrorSetDecl( while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const str_index = sema.code.extra[extra_index]; const name = sema.code.nullTerminatedString(str_index); - const name_ip = try mod.intern_pool.getOrPutString(gpa, name); + const kv = try mod.getErrorValue(name); + const name_ip = try mod.intern_pool.getOrPutString(gpa, kv.key); const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen } @@ -6409,7 +6410,7 @@ fn zirCall( // Generate args to comptime params in comptime block. defer block.is_comptime = parent_comptime; - if (arg_index < fn_params_len and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) { + if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) { block.is_comptime = true; // TODO set comptime_reason } @@ -7077,14 +7078,13 @@ fn analyzeCall( assert(!func_ty_info.is_generic); const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len); - const fn_info = mod.typeToFunc(func_ty).?; for (uncasted_args, 0..) |uncasted_arg, i| { if (i < fn_params_len) { const opts: CoerceOpts = .{ .param_src = .{ .func_inst = func, .param_i = @intCast(u32, i), } }; - const param_ty = fn_info.param_types[i].toType(); + const param_ty = mod.typeToFunc(func_ty).?.param_types[i].toType(); args[i] = sema.analyzeCallArg( block, .unneeded, @@ -8267,7 +8267,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; const enum_tag_ty = sema.typeOf(enum_tag); - const int_tag_ty = try enum_tag_ty.intTagType(mod); + const int_tag_ty = enum_tag_ty.intTagType(mod); if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| { return sema.addConstant(int_tag_ty, try mod.getCoerced(opv, int_tag_ty)); @@ -8299,12 +8299,9 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (try sema.resolveMaybeUndefVal(operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum(mod)) { - const int_tag_ty = try dest_ty.intTagType(mod); + const int_tag_ty = dest_ty.intTagType(mod); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { - return sema.addConstant(dest_ty, (try mod.intern(.{ .enum_tag = .{ - .ty = dest_ty.toIntern(), - .int = int_val.toIntern(), - } })).toValue()); + return sema.addConstant(dest_ty, try mod.getCoerced(int_val, dest_ty)); } const msg = msg: { const msg = try sema.errMsg( @@ -8336,7 +8333,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; return sema.failWithOwnedErrorMsg(msg); } - return sema.addConstant(dest_ty, int_val); + return sema.addConstant(dest_ty, try mod.getCoerced(int_val, dest_ty)); } if (try sema.typeHasOnePossibleValue(dest_ty)) |opv| { @@ -9513,7 +9510,7 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}); } if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| { - return sema.addConstant(Type.usize, ptr_val); + return sema.addConstant(Type.usize, try mod.getCoerced(ptr_val, Type.usize)); } try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src); return block.addUnOp(.ptrtoint, ptr); @@ -9651,7 +9648,7 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_ty); + const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_scalar_ty); const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar); const dest_max = try sema.addConstant(operand_ty, dest_max_val); const diff = try block.addBinOp(.subwrap, dest_max, operand); @@ -12848,7 +12845,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (res_sent_val) |sent_val| { const elem_index = try sema.addIntUnsigned(Type.usize, result_len); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); - const init = try sema.addConstant(lhs_info.elem_type, sent_val); + const init = try sema.addConstant(lhs_info.elem_type, try mod.getCoerced(sent_val, lhs_info.elem_type)); try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store); } @@ -19236,7 +19233,8 @@ fn zirReify( const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); const name_str = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); - const name_ip = try mod.intern_pool.getOrPutString(gpa, name_str); + const kv = try mod.getErrorValue(name_str); + const name_ip = try mod.intern_pool.getOrPutString(gpa, kv.key); const gop = names.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { return sema.fail(block, src, "duplicate error '{s}'", .{name_str}); @@ -19346,7 +19344,7 @@ fn zirReify( return sema.failWithOwnedErrorMsg(msg); } - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, value_val.toIntern())) |other| { + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| { const msg = msg: { const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)}); errdefer msg.destroy(gpa); @@ -20263,7 +20261,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } } - return sema.addConstant(dest_ty, val); + return sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -20421,7 +20419,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData const dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(dest_ty, operand_val); + return sema.addConstant(dest_ty, try mod.getCoerced(operand_val, dest_ty)); } try sema.requireRuntimeBlock(block, src, null); @@ -20624,7 +20622,7 @@ fn zirBitCount( for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(mod, i); const count = comptimeOp(elem_val, scalar_ty, mod); - elem.* = (try mod.intValue(scalar_ty, count)).toIntern(); + elem.* = (try mod.intValue(result_scalar_ty, count)).toIntern(); } return sema.addConstant(result_ty, (try mod.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), @@ -22385,7 +22383,9 @@ fn analyzeMinMax( if (std.debug.runtime_safety) { assert(try sema.intFitsInType(val, refined_ty, null)); } - cur_minmax = try sema.addConstant(refined_ty, try mod.getCoerced(val, refined_ty)); + cur_minmax = try sema.addConstant(refined_ty, (try sema.resolveMaybeUndefVal( + try sema.coerceInMemory(block, val, orig_ty, refined_ty, src), + )).?); } break :refined refined_ty; @@ -23684,7 +23684,7 @@ fn validateExternType( return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod)); }, .Enum => { - return sema.validateExternType(try ty.intTagType(mod), position); + return sema.validateExternType(ty.intTagType(mod), position); }, .Struct, .Union => switch (ty.containerLayout(mod)) { .Extern => return true, @@ -23762,7 +23762,7 @@ fn explainWhyTypeIsNotExtern( } }, .Enum => { - const tag_ty = try ty.intTagType(mod); + const tag_ty = ty.intTagType(mod); try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position); }, @@ -25412,7 +25412,8 @@ fn elemVal( const elem_ptr_ty = try sema.elemPtrType(indexable_ty, index); const elem_ptr_val = try indexable_val.elemPtr(elem_ptr_ty, index, mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { - return sema.addConstant(indexable_ty.elemType2(mod), elem_val); + const result_ty = indexable_ty.elemType2(mod); + return sema.addConstant(result_ty, try mod.getCoerced(elem_val, result_ty)); } break :rs indexable_src; }; @@ -26603,6 +26604,10 @@ fn coerceInMemory( .storage = .{ .elems = dest_elems }, } })).toValue()); }, + .float => |float| return sema.addConstant(dst_ty, (try mod.intern(.{ .float = .{ + .ty = dst_ty.toIntern(), + .storage = float.storage, + } })).toValue()), else => return sema.addConstant(dst_ty, try mod.getCoerced(val, dst_ty)), } } @@ -26983,8 +26988,11 @@ fn coerceInMemoryAllowed( if (dest_ty.eql(src_ty, mod)) return .ok; + const dest_tag = dest_ty.zigTypeTag(mod); + const src_tag = src_ty.zigTypeTag(mod); + // Differently-named integers with the same number of bits. - if (dest_ty.zigTypeTag(mod) == .Int and src_ty.zigTypeTag(mod) == .Int) { + if (dest_tag == .Int and src_tag == .Int) { const dest_info = dest_ty.intInfo(mod); const src_info = src_ty.intInfo(mod); @@ -27009,7 +27017,7 @@ fn coerceInMemoryAllowed( } // Differently-named floats with the same number of bits. - if (dest_ty.zigTypeTag(mod) == .Float and src_ty.zigTypeTag(mod) == .Float) { + if (dest_tag == .Float and src_tag == .Float) { const dest_bits = dest_ty.floatBits(target); const src_bits = src_ty.floatBits(target); if (dest_bits == src_bits) { @@ -27031,9 +27039,6 @@ fn coerceInMemoryAllowed( return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src); } - const dest_tag = dest_ty.zigTypeTag(mod); - const src_tag = src_ty.zigTypeTag(mod); - // Functions if (dest_tag == .Fn and src_tag == .Fn) { return try sema.coerceInMemoryAllowedFns(block, dest_ty, src_ty, target, dest_src, src_src); @@ -27808,7 +27813,7 @@ fn beginComptimePtrMutation( .comptime_field => |comptime_field| { const duped = try sema.arena.create(Value); duped.* = comptime_field.toValue(); - return sema.beginComptimePtrMutationInner(block, src, mod.intern_pool.typeOf(ptr_val.toIntern()).toType(), duped, ptr_elem_ty, .{ + return sema.beginComptimePtrMutationInner(block, src, mod.intern_pool.typeOf(comptime_field).toType(), duped, ptr_elem_ty, .{ .decl = undefined, .runtime_index = .comptime_field_ptr, }); @@ -27864,7 +27869,21 @@ fn beginComptimePtrMutation( .direct => |val_ptr| { const payload_ty = parent.ty.optionalChild(mod); switch (val_ptr.ip_index) { - .undef, .null_value => { + .none => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, + .ty = payload_ty, + }, + else => { + const payload_val = switch (mod.intern_pool.indexToKey(val_ptr.ip_index)) { + .undef => try mod.intern(.{ .undef = payload_ty.toIntern() }), + .opt => |opt| switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), + else => opt.val, + }, + else => unreachable, + }; + // An optional has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the // representation of the optional from `undef` to `opt_payload`. @@ -27874,7 +27893,7 @@ fn beginComptimePtrMutation( const payload = try arena.create(Value.Payload.SubValue); payload.* = .{ .base = .{ .tag = .opt_payload }, - .data = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(), + .data = payload_val.toValue(), }; val_ptr.* = Value.initPayload(&payload.base); @@ -27885,24 +27904,6 @@ fn beginComptimePtrMutation( .ty = payload_ty, }; }, - .none => switch (val_ptr.tag()) { - .opt_payload => return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, - }, - - else => return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - }, - else => return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, } }, .bad_decl_ty, .bad_ptr_ty => return parent, @@ -33339,16 +33340,20 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; }, - .auto, .explicit => switch (enum_type.names.len) { - 0 => return Value.@"unreachable", - 1 => return try mod.getCoerced((if (enum_type.values.len == 0) - try mod.intern(.{ .int = .{ - .ty = enum_type.tag_ty, - .storage = .{ .u64 = 0 }, - } }) - else - enum_type.values[0]).toValue(), ty), - else => return null, + .auto, .explicit => { + if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null; + + switch (enum_type.names.len) { + 0 => return Value.@"unreachable", + 1 => return try mod.getCoerced((if (enum_type.values.len == 0) + try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }) + else + enum_type.values[0]).toValue(), ty), + else => return null, + } }, }, @@ -34241,13 +34246,9 @@ fn intFitsInType( if (ty.toIntern() == .comptime_int_type) return true; const info = ty.intInfo(mod); switch (val.toIntern()) { - .undef, - .zero, - .zero_usize, - .zero_u8, - => return true, - + .zero_usize, .zero_u8 => return true, else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => return true, .variable, .extern_func, .func, .ptr => { const target = mod.getTarget(); const ptr_bits = target.ptrBitWidth(); @@ -34553,7 +34554,7 @@ fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool { return sema.typeOf(ref).isNoReturn(sema.mod); } -/// Avoids crashing the compiler when asking if inferred allocations are known to be a certain type. +/// Avoids crashing the compiler when asking if inferred allocations are known to be a certain zig type. fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool { if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) { .inferred_alloc, .inferred_alloc_comptime => return false, diff --git a/src/TypedValue.zig b/src/TypedValue.zig index f252e8479131..a46e6ebe1f0f 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -302,10 +302,14 @@ fn printAggregate( var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); - switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .struct_type, .anon_struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), - else => {}, - } + if (switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[i], + .anon_struct_type => |anon_struct_type| if (anon_struct_type.isTuple()) + null + else + mod.intern_pool.stringToSlice(anon_struct_type.names[i]), + else => unreachable, + }) |field_name| try writer.print(".{s} = ", .{field_name}); try print(.{ .ty = ty.structFieldType(i, mod), .val = try val.fieldValue(mod, i), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 3afb510d4320..5874440e504c 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4527,7 +4527,7 @@ fn cmp( } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => try lhs_ty.intTagType(mod), + .Enum => lhs_ty.intTagType(mod), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 5f476a2e8028..360f52cb30b7 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4475,7 +4475,7 @@ fn cmp( } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => try lhs_ty.intTagType(mod), + .Enum => lhs_ty.intTagType(mod), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 354af50b61ab..3bcdd5ad25b3 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1435,7 +1435,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Vector => unreachable, // Handled by cmp_vector. - .Enum => try lhs_ty.intTagType(mod), + .Enum => lhs_ty.intTagType(mod), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d2d54a69c5df..af2b37312d15 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -6883,7 +6883,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { return loc.index; } - const int_tag_ty = try enum_ty.intTagType(mod); + const int_tag_ty = enum_ty.intTagType(mod); if (int_tag_ty.bitSize(mod) > 64) { return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{}); diff --git a/src/codegen.zig b/src/codegen.zig index d6520ae7e533..0034e96e35d8 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -312,7 +312,7 @@ pub fn generateSymbol( } }, .enum_tag => |enum_tag| { - const int_tag_ty = try typed_value.ty.intTagType(mod); + const int_tag_ty = typed_value.ty.intTagType(mod); switch (try generateSymbol(bin_file, src_loc, .{ .ty = int_tag_ty, .val = try mod.getCoerced(enum_tag.int.toValue(), int_tag_ty), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 398a4124ccb5..956924eff8d6 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2773,7 +2773,7 @@ pub const DeclGen = struct { return dg.context.intType(info.bits); }, .Enum => { - const int_ty = try t.intTagType(mod); + const int_ty = t.intTagType(mod); const bit_count = int_ty.intInfo(mod).bits; assert(bit_count != 0); return dg.context.intType(bit_count); @@ -4148,9 +4148,7 @@ pub const DeclGen = struct { const mod = dg.module; const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, - .Enum => ty.intTagType(mod) catch |err| switch (err) { - error.OutOfMemory => @panic("OOM"), - }, + .Enum => ty.intTagType(mod), .Float => { if (!is_rmw_xchg) return null; return dg.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8)); @@ -5100,7 +5098,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const scalar_ty = operand_ty.scalarType(mod); const int_ty = switch (scalar_ty.zigTypeTag(mod)) { - .Enum => try scalar_ty.intTagType(mod), + .Enum => scalar_ty.intTagType(mod), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(mod); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 79c968232500..0fbcb47f7101 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -700,7 +700,7 @@ pub const DeclGen = struct { .enum_tag => { const int_val = try val.enumToInt(ty, mod); - const int_ty = try ty.intTagType(mod); + const int_ty = ty.intTagType(mod); try self.lower(int_ty, int_val); }, @@ -1156,7 +1156,7 @@ pub const DeclGen = struct { return try self.intType(int_info.signedness, int_info.bits); }, .Enum => { - const tag_ty = try ty.intTagType(mod); + const tag_ty = ty.intTagType(mod); return self.resolveType(tag_ty, repr); }, .Float => { @@ -3053,7 +3053,7 @@ pub const DeclGen = struct { break :blk if (backing_bits <= 32) @as(u32, 1) else 2; }, .Enum => blk: { - const int_ty = try cond_ty.intTagType(mod); + const int_ty = cond_ty.intTagType(mod); const int_info = int_ty.intInfo(mod); const backing_bits = self.backingIntBits(int_info.bits) orelse { return self.todo("implement composite int switch", .{}); diff --git a/src/type.zig b/src/type.zig index bc2ce6fc7e74..27c7756a6891 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1842,17 +1842,15 @@ pub const Type = struct { /// See also `isPtrLikeOptional`. pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .opt_type => |child| switch (child.toType().zigTypeTag(mod)) { - .Pointer => { - const info = child.toType().ptrInfo(mod); - return switch (info.size) { - .C => false, - else => !info.@"allowzero", - }; + .opt_type => |child_type| switch (mod.intern_pool.indexToKey(child_type)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .C => false, + .Slice, .Many, .One => !ptr_type.is_allowzero, }, - .ErrorSet => true, + .error_set_type => true, else => false, }, + .ptr_type => |ptr_type| ptr_type.size == .C, else => false, }; } @@ -2570,23 +2568,27 @@ pub const Type = struct { return null; }, - .auto, .explicit => switch (enum_type.names.len) { - 0 => return Value.@"unreachable", - 1 => { - if (enum_type.values.len == 0) { - const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.toIntern(), - .int = try mod.intern(.{ .int = .{ - .ty = enum_type.tag_ty, - .storage = .{ .u64 = 0 }, - } }), - } }); - return only.toValue(); - } else { - return enum_type.values[0].toValue(); - } - }, - else => return null, + .auto, .explicit => { + if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null; + + switch (enum_type.names.len) { + 0 => return Value.@"unreachable", + 1 => { + if (enum_type.values.len == 0) { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }), + } }); + return only.toValue(); + } else { + return enum_type.values[0].toValue(); + } + }, + else => return null, + } }, }, @@ -2887,7 +2889,7 @@ pub const Type = struct { } /// Asserts the type is an enum or a union. - pub fn intTagType(ty: Type, mod: *Module) !Type { + pub fn intTagType(ty: Type, mod: *Module) Type { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .union_type => |union_type| mod.unionPtr(union_type.index).tag_ty.intTagType(mod), .enum_type => |enum_type| enum_type.tag_ty.toType(), diff --git a/src/value.zig b/src/value.zig index a63b83daae5a..f02c31ca8448 100644 --- a/src/value.zig +++ b/src/value.zig @@ -673,7 +673,7 @@ pub const Value = struct { while (true) switch (mod.intern_pool.indexToKey(check.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl, .comptime_field => return true, - .eu_payload, .opt_payload => |index| check = index.toValue(), + .eu_payload, .opt_payload => |base| check = base.toValue(), .elem, .field => |base_index| check = base_index.base.toValue(), else => return false, }, @@ -943,22 +943,27 @@ pub const Value = struct { return Value.true; } }, - .Int, .Enum => { - const int_info = ty.intInfo(mod); + .Int, .Enum => |ty_tag| { + const int_ty = switch (ty_tag) { + .Int => ty, + .Enum => ty.intTagType(mod), + else => unreachable, + }; + const int_info = int_ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; - if (bits == 0 or buffer.len == 0) return mod.intValue(ty, 0); + if (bits == 0 or buffer.len == 0) return mod.getCoerced(try mod.intValue(int_ty, 0), ty); if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => { const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian); const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); - return mod.intValue(ty, result); + return mod.getCoerced(try mod.intValue(int_ty, result), ty); }, .unsigned => { const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian); const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); - return mod.intValue(ty, result); + return mod.getCoerced(try mod.intValue(int_ty, result), ty); }, } else { // Slow path, we have to construct a big-int const Limb = std.math.big.Limb; @@ -967,7 +972,7 @@ pub const Value = struct { var bigint = BigIntMutable.init(limbs_buffer, 0); bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness); - return mod.intValue_big(ty, bigint.toConst()); + return mod.getCoerced(try mod.intValue_big(int_ty, bigint.toConst()), ty); } }, .Float => return (try mod.intern(.{ .float = .{ @@ -1583,7 +1588,7 @@ pub const Value = struct { .Enum => { const a_val = try a.enumToInt(ty, mod); const b_val = try b.enumToInt(ty, mod); - const int_ty = try ty.intTagType(mod); + const int_ty = ty.intTagType(mod); return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, .Array, .Vector => { @@ -1835,7 +1840,8 @@ pub const Value = struct { })).toValue(), .ptr => |ptr| switch (ptr.addr) { .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), - .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), + .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod)) + .toValue().elemValue(mod, index), .int, .eu_payload, .opt_payload => unreachable, .comptime_field => |field_val| field_val.toValue().elemValue(mod, index), .elem => |elem| elem.base.toValue().elemValue(mod, index + elem.index), @@ -1946,9 +1952,12 @@ pub const Value = struct { return switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), - .mut_decl => |mut_decl| try mod.declPtr(mut_decl.decl).val.sliceArray(mod, arena, start, end), - .comptime_field => |comptime_field| try comptime_field.toValue().sliceArray(mod, arena, start, end), - .elem => |elem| try elem.base.toValue().sliceArray(mod, arena, start + elem.index, end + elem.index), + .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod)).toValue() + .sliceArray(mod, arena, start, end), + .comptime_field => |comptime_field| comptime_field.toValue() + .sliceArray(mod, arena, start, end), + .elem => |elem| elem.base.toValue() + .sliceArray(mod, arena, start + elem.index, end + elem.index), else => unreachable, }, .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ From 32692569656d9a178abb24f8fb7893395700cb62 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 29 May 2023 00:10:36 -0400 Subject: [PATCH 121/205] behavior: fix more compiler crashes --- src/InternPool.zig | 11 +- src/Module.zig | 29 +---- src/Sema.zig | 208 ++++++++++++++++++++++--------- src/TypedValue.zig | 4 +- src/codegen/c.zig | 4 +- src/codegen/llvm.zig | 2 +- src/type.zig | 4 +- src/value.zig | 226 +++++++++++++++++++--------------- tools/lldb_pretty_printers.py | 26 ++-- 9 files changed, 309 insertions(+), 205 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 55ab58c391fa..90e0e2bd35b4 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1416,7 +1416,12 @@ pub const Index = enum(u32) { only_possible_value: DataIsIndex, union_value: struct { data: *Key.Union }, bytes: struct { data: *Bytes }, - aggregate: struct { data: *Aggregate }, + aggregate: struct { + const @"data.ty.data.len orelse data.ty.data.fields_len" = opaque {}; + data: *Aggregate, + @"trailing.element_values.len": *@"data.ty.data.len orelse data.ty.data.fields_len", + trailing: struct { element_values: []Index }, + }, repeated: struct { data: *Repeated }, memoized_decl: struct { data: *Key.MemoizedDecl }, @@ -4437,7 +4442,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .Slice => try ip.get(gpa, .{ .undef = .usize_type }), }, } }), - else => try ip.getCoerced(gpa, opt.val, new_ty), + else => |payload| try ip.getCoerced(gpa, payload, new_ty), }, .err => |err| if (ip.isErrorSetType(new_ty)) return ip.get(gpa, .{ .err = .{ @@ -4622,7 +4627,7 @@ pub fn isErrorUnionType(ip: InternPool, ty: Index) bool { pub fn isAggregateType(ip: InternPool, ty: Index) bool { return switch (ip.indexToKey(ty)) { - .array_wype, .vector_type, .anon_struct_type, .struct_type => true, + .array_type, .vector_type, .anon_struct_type, .struct_type => true, else => false, }; } diff --git a/src/Module.zig b/src/Module.zig index d11a11cf0835..36037bb49c11 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6678,14 +6678,14 @@ pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error! pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type { var canon_info = info; + const have_elem_layout = info.elem_type.toType().layoutIsResolved(mod); // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee // type, we change it to 0 here. If this causes an assertion trip because the // pointee type needs to be resolved more, that needs to be done before calling // this ptr() function. if (info.alignment.toByteUnitsOptional()) |info_align| { - const elem_align = info.elem_type.toType().abiAlignment(mod); - if (info.elem_type.toType().layoutIsResolved(mod) and info_align == elem_align) { + if (have_elem_layout and info_align == info.elem_type.toType().abiAlignment(mod)) { canon_info.alignment = .none; } } @@ -6694,7 +6694,7 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type // Canonicalize host_size. If it matches the bit size of the pointee type, // we change it to 0 here. If this causes an assertion trip, the pointee type // needs to be resolved before calling this ptr() function. - .none => if (info.host_size != 0) { + .none => if (have_elem_layout and info.host_size != 0) { const elem_bit_size = info.elem_type.toType().bitSize(mod); assert(info.bit_offset + elem_bit_size <= info.host_size * 8); if (info.host_size * 8 == elem_bit_size) { @@ -6782,21 +6782,7 @@ pub fn errorSetFromUnsortedNames( /// Supports optionals in addition to pointers. pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { - if (ty.isPtrLikeOptional(mod)) { - const i = try intern(mod, .{ .opt = .{ - .ty = ty.toIntern(), - .val = try intern(mod, .{ .ptr = .{ - .ty = ty.childType(mod).toIntern(), - .addr = .{ .int = try intern(mod, .{ .int = .{ - .ty = .usize_type, - .storage = .{ .u64 = x }, - } }) }, - } }), - } }); - return i.toValue(); - } else { - return ptrIntValue_ptronly(mod, ty, x); - } + return mod.getCoerced(try mod.intValue_u64(Type.usize, x), ty); } /// Supports only pointers. See `ptrIntValue` for pointer-like optional support. @@ -6804,10 +6790,7 @@ pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value assert(ty.zigTypeTag(mod) == .Pointer); const i = try intern(mod, .{ .ptr = .{ .ty = ty.toIntern(), - .addr = .{ .int = try intern(mod, .{ .int = .{ - .ty = .usize_type, - .storage = .{ .u64 = x }, - } }) }, + .addr = .{ .int = try mod.intValue_u64(Type.usize, x) }, } }); return i.toValue(); } @@ -6954,7 +6937,7 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { const key = mod.intern_pool.indexToKey(val.toIntern()); switch (key.int.storage) { .i64 => |x| { - if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted); + if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted) + @boolToInt(sign); assert(sign); // Protect against overflow in the following negation. if (x == std.math.minInt(i64)) return 64; diff --git a/src/Sema.zig b/src/Sema.zig index 7ad7b1a8a305..87d66aad1f70 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -9510,7 +9510,10 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}); } if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| { - return sema.addConstant(Type.usize, try mod.getCoerced(ptr_val, Type.usize)); + return sema.addConstant( + Type.usize, + try mod.intValue(Type.usize, (try ptr_val.getUnsignedIntAdvanced(mod, sema)).?), + ); } try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src); return block.addUnOp(.ptrtoint, ptr); @@ -27879,7 +27882,7 @@ fn beginComptimePtrMutation( .undef => try mod.intern(.{ .undef = payload_ty.toIntern() }), .opt => |opt| switch (opt.val) { .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), - else => opt.val, + else => |payload| payload, }, else => unreachable, }; @@ -28438,7 +28441,7 @@ fn beginComptimePtrLoad( }, .opt => |opt| switch (opt.val) { .none => return sema.fail(block, src, "attempt to use null value", .{}), - else => opt.val, + else => |payload| payload, }, else => unreachable, }.toValue(), @@ -28591,7 +28594,7 @@ fn beginComptimePtrLoad( }, .opt => |opt| switch (opt.val) { .none => return sema.fail(block, src, "attempt to use null value", .{}), - else => try sema.beginComptimePtrLoad(block, src, opt.val.toValue(), null), + else => |payload| try sema.beginComptimePtrLoad(block, src, payload.toValue(), null), }, else => unreachable, }; @@ -28931,35 +28934,53 @@ fn coerceAnonStructToUnion( ) !Air.Inst.Ref { const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const field_count = inst_ty.structFieldCount(mod); - if (field_count != 1) { - const msg = msg: { - const msg = if (field_count > 1) try sema.errMsg( - block, - inst_src, - "cannot initialize multiple union fields at once; unions can only have one active field", - .{}, - ) else try sema.errMsg( - block, - inst_src, - "union initializer must initialize one field", - .{}, - ); - errdefer msg.destroy(sema.gpa); + const field_info: union(enum) { + name: []const u8, + count: usize, + } = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 1) + .{ .name = mod.intern_pool.stringToSlice(anon_struct_type.names[0]) } + else + .{ .count = anon_struct_type.names.len }, + .struct_type => |struct_type| name: { + const field_names = mod.structPtrUnwrap(struct_type.index).?.fields.keys(); + break :name if (field_names.len == 1) + .{ .name = field_names[0] } + else + .{ .count = field_names.len }; + }, + else => unreachable, + }; + switch (field_info) { + .name => |field_name| { + const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); + return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); + }, + .count => |field_count| { + assert(field_count != 1); + const msg = msg: { + const msg = if (field_count > 1) try sema.errMsg( + block, + inst_src, + "cannot initialize multiple union fields at once; unions can only have one active field", + .{}, + ) else try sema.errMsg( + block, + inst_src, + "union initializer must initialize one field", + .{}, + ); + errdefer msg.destroy(sema.gpa); - // TODO add notes for where the anon struct was created to point out - // the extra fields. + // TODO add notes for where the anon struct was created to point out + // the extra fields. - try sema.addDeclaredHereNote(msg, union_ty); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); + try sema.addDeclaredHereNote(msg, union_ty); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + }, } - - const anon_struct = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; - const field_name = mod.intern_pool.stringToSlice(anon_struct.names[0]); - const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); - return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); } fn coerceAnonStructToUnionPtrs( @@ -29193,16 +29214,27 @@ fn coerceTupleToStruct( @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const anon_struct = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; var runtime_src: ?LazySrcLoc = null; - for (0..anon_struct.types.len) |field_index_usize| { + const field_count = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| + struct_obj.fields.count() + else + 0, + else => unreachable, + }; + for (0..field_count) |field_index_usize| { const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location - const field_name = if (anon_struct.names.len != 0) - // https://github.com/ziglang/zig/issues/15709 - @as([]const u8, mod.intern_pool.stringToSlice(anon_struct.names[field_i])) - else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}); + // https://github.com/ziglang/zig/issues/15709 + const field_name: []const u8 = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) + mod.intern_pool.stringToSlice(anon_struct_type.names[field_i]) + else + try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], + else => unreachable, + }; const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src); const field = fields.values()[field_index]; const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); @@ -29281,40 +29313,72 @@ fn coerceTupleToTuple( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; - const dest_tuple = mod.intern_pool.indexToKey(tuple_ty.toIntern()).anon_struct_type; - const field_vals = try sema.arena.alloc(InternPool.Index, dest_tuple.types.len); + const dest_field_count = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| + struct_obj.fields.count() + else + 0, + else => unreachable, + }; + const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const src_tuple = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; - if (src_tuple.types.len > dest_tuple.types.len) return error.NotCoercible; + const src_field_count = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| + struct_obj.fields.count() + else + 0, + else => unreachable, + }; + if (src_field_count > dest_field_count) return error.NotCoercible; var runtime_src: ?LazySrcLoc = null; - for (dest_tuple.types, dest_tuple.values, 0..) |field_ty, default_val, field_index_usize| { + for (0..dest_field_count) |field_index_usize| { const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location - const field_name = if (src_tuple.names.len != 0) - // https://github.com/ziglang/zig/issues/15709 - @as([]const u8, mod.intern_pool.stringToSlice(src_tuple.names[field_i])) - else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}); + // https://github.com/ziglang/zig/issues/15709 + const field_name: []const u8 = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) + mod.intern_pool.stringToSlice(anon_struct_type.names[field_i]) + else + try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], + else => unreachable, + }; if (mem.eql(u8, field_name, "len")) { return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{}); } + const field_ty = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types[field_index_usize].toType(), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].ty, + else => unreachable, + }; + const default_val = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.values[field_index_usize], + .struct_type => |struct_type| switch (mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val.toIntern()) { + .unreachable_value => .none, + else => |default_val| default_val, + }, + else => unreachable, + }; + const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); - const coerced = try sema.coerce(block, field_ty.toType(), elem_ref, field_src); + const coerced = try sema.coerce(block, field_ty, elem_ref, field_src); field_refs[field_index] = coerced; if (default_val != .none) { const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(default_val.toValue(), field_ty.toType(), sema.mod)) { + if (!init_val.eql(default_val.toValue(), field_ty, sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } @@ -29331,14 +29395,18 @@ fn coerceTupleToTuple( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - for ( - dest_tuple.types, - dest_tuple.values, - field_refs, - 0.., - ) |field_ty, default_val, *field_ref, i| { + for (field_refs, 0..) |*field_ref, i| { if (field_ref.* != .none) continue; + const default_val = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.values[i], + .struct_type => |struct_type| switch (mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val.toIntern()) { + .unreachable_value => .none, + else => |default_val| default_val, + }, + else => unreachable, + }; + const field_src = inst_src; // TODO better source location if (default_val == .none) { if (tuple_ty.isTuple(mod)) { @@ -29362,7 +29430,12 @@ fn coerceTupleToTuple( if (runtime_src == null) { field_vals[i] = default_val; } else { - field_ref.* = try sema.addConstant(field_ty.toType(), default_val.toValue()); + const field_ty = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types[i].toType(), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].ty, + else => unreachable, + }; + field_ref.* = try sema.addConstant(field_ty, default_val.toValue()); } } @@ -33959,11 +34032,20 @@ fn anonStructFieldIndex( field_src: LazySrcLoc, ) !u32 { const mod = sema.mod; - const anon_struct = mod.intern_pool.indexToKey(struct_ty.toIntern()).anon_struct_type; - for (anon_struct.names, 0..) |name, i| { - if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) { - return @intCast(u32, i); - } + switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| for (anon_struct_type.names, 0..) |name, i| { + if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) { + return @intCast(u32, i); + } + }, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { + for (struct_obj.fields.keys(), 0..) |name, i| { + if (mem.eql(u8, name, field_name)) { + return @intCast(u32, i); + } + } + }, + else => unreachable, } return sema.fail(block, field_src, "no field named '{s}' in anonymous struct '{}'", .{ field_name, struct_ty.fmt(sema.mod), @@ -34006,6 +34088,10 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); + if (scalar_ty.toIntern() != .comptime_int_type) { + const int_info = scalar_ty.intInfo(mod); + result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits); + } return mod.intValue_big(scalar_ty, result_bigint.toConst()); } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index a46e6ebe1f0f..0128a3cbfb4c 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -254,8 +254,8 @@ pub fn print( .ptr => return writer.writeAll("(ptr)"), .opt => |opt| switch (opt.val) { .none => return writer.writeAll("null"), - else => { - val = opt.val.toValue(); + else => |payload| { + val = payload.toValue(); ty = ty.optionalChild(mod); }, }, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index e6ce72f48e5b..2f65513dcd81 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1313,7 +1313,7 @@ pub const DeclGen = struct { if (ty.optionalReprIsPayload(mod)) switch (opt.val) { .none => return writer.writeByte('0'), - else => return dg.renderValue(writer, payload_ty, opt.val.toValue(), location), + else => |payload| return dg.renderValue(writer, payload_ty, payload.toValue(), location), }; if (!location.isInitializer()) { @@ -1325,7 +1325,7 @@ pub const DeclGen = struct { try writer.writeAll("{ .payload = "); try dg.renderValue(writer, payload_ty, switch (opt.val) { .none => try mod.intern(.{ .undef = payload_ty.ip_index }), - else => opt.val, + else => |payload| payload, }.toValue(), initializer_type); try writer.writeAll(", .is_null = "); try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 956924eff8d6..606c57b187be 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3430,7 +3430,7 @@ pub const DeclGen = struct { const llvm_ty = try dg.lowerType(tv.ty); if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) { .none => llvm_ty.constNull(), - else => dg.lowerValue(.{ .ty = payload_ty, .val = opt.val.toValue() }), + else => |payload| dg.lowerValue(.{ .ty = payload_ty, .val = payload.toValue() }), }; assert(payload_ty.zigTypeTag(mod) != .Fn); diff --git a/src/type.zig b/src/type.zig index 27c7756a6891..0ce242b6163e 100644 --- a/src/type.zig +++ b/src/type.zig @@ -630,7 +630,6 @@ pub const Type = struct { pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type, - .ptr_type, .vector_type, => true, @@ -646,6 +645,7 @@ pub const Type = struct { .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), .opt_type => ty.isPtrLikeOptional(mod), + .ptr_type => |ptr_type| ptr_type.size != .Slice, .simple_type => |t| switch (t) { .f16, @@ -1578,7 +1578,7 @@ pub const Type = struct { .int_type => |int_type| return int_type.bits, .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth(), }, .anyframe_type => return target.ptrBitWidth(), diff --git a/src/value.zig b/src/value.zig index f02c31ca8448..0da562693756 100644 --- a/src/value.zig +++ b/src/value.zig @@ -363,7 +363,7 @@ pub const Value = struct { }, .slice => { const pl = val.castTag(.slice).?.data; - const ptr = try pl.ptr.intern(ty.optionalChild(mod), mod); + const ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod); var ptr_key = mod.intern_pool.indexToKey(ptr).ptr; assert(ptr_key.len == .none); ptr_key.ty = ty.toIntern(); @@ -547,7 +547,6 @@ pub const Value = struct { return switch (val.toIntern()) { .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), - .undef => unreachable, .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .runtime_value => |runtime_value| runtime_value.val.toValue().toBigIntAdvanced(space, mod, opt_sema), @@ -564,19 +563,10 @@ pub const Value = struct { }, }, .enum_tag => |enum_tag| enum_tag.int.toValue().toBigIntAdvanced(space, mod, opt_sema), - .ptr => |ptr| switch (ptr.len) { - .none => switch (ptr.addr) { - .int => |int| int.toValue().toBigIntAdvanced(space, mod, opt_sema), - .elem => |elem| { - const base_addr = (try elem.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)).?; - const elem_size = ptr.ty.toType().elemType2(mod).abiSize(mod); - const new_addr = base_addr + elem.index * elem_size; - return BigIntMutable.init(&space.limbs, new_addr).toConst(); - }, - else => unreachable, - }, - else => unreachable, - }, + .opt, .ptr => BigIntMutable.init( + &space.limbs, + (try val.getUnsignedIntAdvanced(mod, opt_sema)).?, + ).toConst(), else => unreachable, }, }; @@ -614,10 +604,11 @@ pub const Value = struct { /// Asserts not undefined. pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { return switch (val.toIntern()) { + .undef => unreachable, .bool_false => 0, .bool_true => 1, - .undef => unreachable, else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => unreachable, .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(u64) catch null, .u64 => |x| x, @@ -631,6 +622,26 @@ pub const Value = struct { else ty.toType().abiSize(mod), }, + .ptr => |ptr| switch (ptr.addr) { + .int => |int| int.toValue().getUnsignedIntAdvanced(mod, opt_sema), + .elem => |elem| { + const base_addr = (try elem.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; + const elem_size = ptr.ty.toType().elemType2(mod).abiSize(mod); + return base_addr + elem.index * elem_size; + }, + .field => |field| { + const struct_ty = ptr.ty.toType().childType(mod); + if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); + const base_addr = (try field.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; + const field_offset = ptr.ty.toType().childType(mod).structFieldOffset(field.index, mod); + return base_addr + field_offset; + }, + else => null, + }, + .opt => |opt| switch (opt.val) { + .none => 0, + else => |payload| payload.toValue().getUnsignedIntAdvanced(mod, opt_sema), + }, else => null, }, }; @@ -646,7 +657,6 @@ pub const Value = struct { return switch (val.toIntern()) { .bool_false => 0, .bool_true => 1, - .undef => unreachable, else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(i64) catch unreachable, @@ -830,24 +840,14 @@ pub const Value = struct { } }, .Int, .Enum => { + if (buffer.len == 0) return; const bits = ty.intInfo(mod).bits; - const abi_size = @intCast(usize, ty.abiSize(mod)); - - const int_val = try val.enumToInt(ty, mod); + if (bits == 0) return; - if (abi_size == 0) return; - if (abi_size <= @sizeOf(u64)) { - const ip_key = mod.intern_pool.indexToKey(int_val.toIntern()); - const int: u64 = switch (ip_key.int.storage) { - .u64 => |x| x, - .i64 => |x| @bitCast(u64, x), - else => unreachable, - }; - std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian); - } else { - var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, mod); - bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian); + switch (mod.intern_pool.indexToKey((try val.enumToInt(ty, mod)).toIntern()).int.storage) { + inline .u64, .i64 => |int| std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian), + .big_int => |bigint| bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian), + else => unreachable, } }, .Float => switch (ty.floatBits(target)) { @@ -1075,25 +1075,40 @@ pub const Value = struct { return Value.true; } }, - .Int, .Enum => { + .Int, .Enum => |ty_tag| { if (buffer.len == 0) return mod.intValue(ty, 0); const int_info = ty.intInfo(mod); - const abi_size = @intCast(usize, ty.abiSize(mod)); - const bits = int_info.bits; if (bits == 0) return mod.intValue(ty, 0); - if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 - .signed => return mod.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), - .unsigned => return mod.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), - } else { // Slow path, we have to construct a big-int - const Limb = std.math.big.Limb; - const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); - const limbs_buffer = try arena.alloc(Limb, limb_count); - var bigint = BigIntMutable.init(limbs_buffer, 0); - bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); - return mod.intValue_big(ty, bigint.toConst()); + // Fast path for integers <= u64 + if (bits <= 64) { + const int_ty = switch (ty_tag) { + .Int => ty, + .Enum => ty.intTagType(mod), + else => unreachable, + }; + return mod.getCoerced(switch (int_info.signedness) { + .signed => return mod.intValue( + int_ty, + std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed), + ), + .unsigned => return mod.intValue( + int_ty, + std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned), + ), + }, ty); } + + // Slow path, we have to construct a big-int + const abi_size = @intCast(usize, ty.abiSize(mod)); + const Limb = std.math.big.Limb; + const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); + const limbs_buffer = try arena.alloc(Limb, limb_count); + + var bigint = BigIntMutable.init(limbs_buffer, 0); + bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); + return mod.intValue_big(ty, bigint.toConst()); }, .Float => return (try mod.intern(.{ .float = .{ .ty = ty.toIntern(), @@ -1764,7 +1779,7 @@ pub const Value = struct { }, .opt => |opt| switch (opt.val) { .none => false, - else => opt.val.toValue().canMutateComptimeVarState(mod), + else => |payload| payload.toValue().canMutateComptimeVarState(mod), }, .aggregate => |aggregate| for (aggregate.storage.values()) |elem| { if (elem.toValue().canMutateComptimeVarState(mod)) break true; @@ -1949,43 +1964,51 @@ pub const Value = struct { start: usize, end: usize, ) error{OutOfMemory}!Value { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), - .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod)).toValue() - .sliceArray(mod, arena, start, end), - .comptime_field => |comptime_field| comptime_field.toValue() - .sliceArray(mod, arena, start, end), - .elem => |elem| elem.base.toValue() - .sliceArray(mod, arena, start + elem.index, end + elem.index), + return switch (val.ip_index) { + .none => switch (val.tag()) { + .slice => val.castTag(.slice).?.data.ptr.sliceArray(mod, arena, start, end), + .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), + .repeated => val, + .aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]), else => unreachable, }, - .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ - .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) { - .array_type => |array_type| try mod.arrayType(.{ - .len = @intCast(u32, end - start), - .child = array_type.child, - .sentinel = if (end == array_type.len) array_type.sentinel else .none, - }), - .vector_type => |vector_type| try mod.vectorType(.{ - .len = @intCast(u32, end - start), - .child = vector_type.child, - }), + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), + .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod)).toValue() + .sliceArray(mod, arena, start, end), + .comptime_field => |comptime_field| comptime_field.toValue() + .sliceArray(mod, arena, start, end), + .elem => |elem| elem.base.toValue() + .sliceArray(mod, arena, start + elem.index, end + elem.index), else => unreachable, - }.toIntern(), - .storage = switch (aggregate.storage) { - .bytes => |bytes| .{ .bytes = bytes[start..end] }, - .elems => |elems| .{ .elems = elems[start..end] }, - .repeated_elem => |elem| .{ .repeated_elem = elem }, }, - } })).toValue(), - else => unreachable, + .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ + .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) { + .array_type => |array_type| try mod.arrayType(.{ + .len = @intCast(u32, end - start), + .child = array_type.child, + .sentinel = if (end == array_type.len) array_type.sentinel else .none, + }), + .vector_type => |vector_type| try mod.vectorType(.{ + .len = @intCast(u32, end - start), + .child = vector_type.child, + }), + else => unreachable, + }.toIntern(), + .storage = switch (aggregate.storage) { + .bytes => |bytes| .{ .bytes = bytes[start..end] }, + .elems => |elems| .{ .elems = elems[start..end] }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, + }, + } })).toValue(), + else => unreachable, + }, }; } pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { return switch (val.ip_index) { - .undef => Value.undef, .none => switch (val.tag()) { .aggregate => { const field_values = val.castTag(.aggregate).?.data; @@ -1999,6 +2022,9 @@ pub const Value = struct { else => unreachable, }, else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => |ty| (try mod.intern(.{ + .undef = ty.toType().structFieldType(index, mod).toIntern(), + })).toValue(), .aggregate => |aggregate| switch (aggregate.storage) { .bytes => |bytes| try mod.intern(.{ .int = .{ .ty = .u8_type, @@ -2108,6 +2134,7 @@ pub const Value = struct { .null_value => true, else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => unreachable, .int => { var buf: BigIntSpace = undefined; return val.toBigInt(&buf, mod).eqZero(); @@ -2141,9 +2168,13 @@ pub const Value = struct { /// Value of the optional, null if optional has no payload. pub fn optionalValue(val: Value, mod: *const Module) ?Value { - return switch (mod.intern_pool.indexToKey(val.toIntern()).opt.val) { - .none => null, - else => |index| index.toValue(), + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .opt => |opt| switch (opt.val) { + .none => null, + else => |payload| payload.toValue(), + }, + .ptr => val, + else => unreachable, }; } @@ -2152,6 +2183,7 @@ pub const Value = struct { return switch (self.toIntern()) { .undef => unreachable, else => switch (mod.intern_pool.indexToKey(self.toIntern())) { + .undef => unreachable, .float => true, else => false, }, @@ -2182,28 +2214,26 @@ pub const Value = struct { } pub fn intToFloatScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - return switch (val.toIntern()) { - .undef => val, - else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .int => |int| switch (int.storage) { - .big_int => |big_int| { - const float = bigIntToFloat(big_int.limbs, big_int.positive); - return mod.floatValue(float_ty, float); - }, - inline .u64, .i64 => |x| intToFloatInner(x, float_ty, mod), - .lazy_align => |ty| if (opt_sema) |sema| { - return intToFloatInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return intToFloatInner(ty.toType().abiAlignment(mod), float_ty, mod); - }, - .lazy_size => |ty| if (opt_sema) |sema| { - return intToFloatInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return intToFloatInner(ty.toType().abiSize(mod), float_ty, mod); - }, + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => (try mod.intern(.{ .undef = float_ty.toIntern() })).toValue(), + .int => |int| switch (int.storage) { + .big_int => |big_int| { + const float = bigIntToFloat(big_int.limbs, big_int.positive); + return mod.floatValue(float_ty, float); + }, + inline .u64, .i64 => |x| intToFloatInner(x, float_ty, mod), + .lazy_align => |ty| if (opt_sema) |sema| { + return intToFloatInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); + } else { + return intToFloatInner(ty.toType().abiAlignment(mod), float_ty, mod); + }, + .lazy_size => |ty| if (opt_sema) |sema| { + return intToFloatInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); + } else { + return intToFloatInner(ty.toType().abiSize(mod), float_ty, mod); }, - else => unreachable, }, + else => unreachable, }; } diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 0bba97dcaa1a..3d57adee707e 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -455,21 +455,21 @@ def update(self): elif encoding_field.name == 'trailing': trailing_data = lldb.SBData() for trailing_field in encoding_field.type.fields: - if trailing_field.type.IsAggregateType(): - trailing_data.Append(extra.GetChildAtIndex(extra_index).address_of.data) - len = dynamic_values['trailing.%s.len' % trailing_field.name].unsigned - trailing_data.Append(lldb.SBData.CreateDataFromInt(len, trailing_data.GetAddressByteSize())) - extra_index += len - else: - pass + trailing_data.Append(extra.GetChildAtIndex(extra_index).address_of.data) + trailing_len = dynamic_values['trailing.%s.len' % trailing_field.name].unsigned + trailing_data.Append(lldb.SBData.CreateDataFromInt(trailing_len, trailing_data.GetAddressByteSize())) + extra_index += trailing_len self.trailing = self.data.CreateValueFromData('trailing', trailing_data, encoding_field.type) else: - path = encoding_field.type.GetPointeeType().name.removeprefix('%s::' % encoding_type.name).removeprefix('%s.' % encoding_type.name).partition('__')[0].split('.') - if path[0] == 'data': - dynamic_value = self.data - for name in path[1:]: - dynamic_value = dynamic_value.GetChildMemberWithName(name) - dynamic_values[encoding_field.name] = dynamic_value + for path in encoding_field.type.GetPointeeType().name.removeprefix('%s::' % encoding_type.name).removeprefix('%s.' % encoding_type.name).partition('__')[0].split(' orelse '): + if path.startswith('data.'): + root = self.data + path = path[len('data'):] + else: return + dynamic_value = root.GetValueForExpressionPath(path) + if dynamic_value: + dynamic_values[encoding_field.name] = dynamic_value + break except: pass def has_children(self): return True def num_children(self): return 2 + (self.trailing is not None) From 4f70863a55e699c13731325f8c52870119479c02 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 29 May 2023 07:30:30 -0400 Subject: [PATCH 122/205] InternPool: fix various pointer issues --- src/InternPool.zig | 145 ++++++++++++++++++++++++--------------------- src/Module.zig | 4 ++ src/Sema.zig | 29 +++++---- src/value.zig | 4 +- 4 files changed, 102 insertions(+), 80 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 90e0e2bd35b4..7bfecf46b6c7 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1374,12 +1374,12 @@ pub const Index = enum(u32) { undef: DataIsIndex, runtime_value: DataIsIndex, simple_value: struct { data: SimpleValue }, - ptr_mut_decl: struct { data: *PtrMutDecl }, ptr_decl: struct { data: *PtrDecl }, - ptr_int: struct { data: *PtrAddr }, - ptr_eu_payload: DataIsIndex, - ptr_opt_payload: DataIsIndex, + ptr_mut_decl: struct { data: *PtrMutDecl }, ptr_comptime_field: struct { data: *PtrComptimeField }, + ptr_int: struct { data: *PtrBase }, + ptr_eu_payload: struct { data: *PtrBase }, + ptr_opt_payload: struct { data: *PtrBase }, ptr_elem: struct { data: *PtrBaseIndex }, ptr_field: struct { data: *PtrBaseIndex }, ptr_slice: struct { data: *PtrSlice }, @@ -1774,29 +1774,25 @@ pub const Tag = enum(u8) { /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, - /// A pointer to a decl that can be mutated at comptime. - /// data is extra index of PtrMutDecl, which contains the type and address. - ptr_mut_decl, /// A pointer to a decl. - /// data is extra index of PtrDecl, which contains the type and address. + /// data is extra index of `PtrDecl`, which contains the type and address. ptr_decl, + /// A pointer to a decl that can be mutated at comptime. + /// data is extra index of `PtrMutDecl`, which contains the type and address. + ptr_mut_decl, + /// data is extra index of `PtrComptimeField`, which contains the pointer type and field value. + ptr_comptime_field, /// A pointer with an integer value. - /// data is extra index of PtrAddr, which contains the type and address. + /// data is extra index of `PtrBase`, which contains the type and address. /// Only pointer types are allowed to have this encoding. Optional types must use /// `opt_payload` or `opt_null`. ptr_int, /// A pointer to the payload of an error union. - /// data is Index of a pointer value to the error union. - /// In order to use this encoding, one must ensure that the `InternPool` - /// already contains the payload pointer type corresponding to this payload. + /// data is extra index of `PtrBase`, which contains the type and base pointer. ptr_eu_payload, /// A pointer to the payload of an optional. - /// data is Index of a pointer value to the optional. - /// In order to use this encoding, one must ensure that the `InternPool` - /// already contains the payload pointer type corresponding to this payload. + /// data is extra index of `PtrBase`, which contains the type and base pointer. ptr_opt_payload, - /// data is extra index of PtrComptimeField, which contains the pointer type and field value. - ptr_comptime_field, /// A pointer to an array element. /// data is extra index of PtrBaseIndex, which contains the base array and element index. /// In order to use this encoding, one must ensure that the `InternPool` @@ -2224,14 +2220,14 @@ pub const PtrMutDecl = struct { runtime_index: RuntimeIndex, }; -pub const PtrAddr = struct { +pub const PtrComptimeField = struct { ty: Index, - addr: Index, + field_val: Index, }; -pub const PtrComptimeField = struct { +pub const PtrBase = struct { ty: Index, - field_val: Index, + base: Index, }; pub const PtrBaseIndex = struct { @@ -2598,36 +2594,23 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, } }; }, - .ptr_int => { - const info = ip.extraData(PtrAddr, data); + .ptr_comptime_field => { + const info = ip.extraData(PtrComptimeField, data); return .{ .ptr = .{ .ty = info.ty, - .addr = .{ .int = info.addr }, - } }; - }, - .ptr_eu_payload => { - const ptr_eu_index = @intToEnum(Index, data); - var ptr_type = ip.indexToKey(ip.typeOf(ptr_eu_index)).ptr_type; - ptr_type.elem_type = ip.indexToKey(ptr_type.elem_type).error_union_type.payload_type; - return .{ .ptr = .{ - .ty = ip.getAssumeExists(.{ .ptr_type = ptr_type }), - .addr = .{ .eu_payload = ptr_eu_index }, + .addr = .{ .comptime_field = info.field_val }, } }; }, - .ptr_opt_payload => { - const ptr_opt_index = @intToEnum(Index, data); - var ptr_type = ip.indexToKey(ip.typeOf(ptr_opt_index)).ptr_type; - ptr_type.elem_type = ip.indexToKey(ptr_type.elem_type).opt_type; - return .{ .ptr = .{ - .ty = ip.getAssumeExists(.{ .ptr_type = ptr_type }), - .addr = .{ .opt_payload = ptr_opt_index }, - } }; - }, - .ptr_comptime_field => { - const info = ip.extraData(PtrComptimeField, data); + .ptr_int, .ptr_eu_payload, .ptr_opt_payload => { + const info = ip.extraData(PtrBase, data); return .{ .ptr = .{ .ty = info.ty, - .addr = .{ .comptime_field = info.field_val }, + .addr = switch (item.tag) { + .ptr_int => .{ .int = info.base }, + .ptr_eu_payload => .{ .eu_payload = info.base }, + .ptr_opt_payload => .{ .opt_payload = info.base }, + else => unreachable, + }, } }; }, .ptr_elem => { @@ -3248,39 +3231,67 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .runtime_index = mut_decl.runtime_index, }), }), - .int => |int| { - assert(ip.typeOf(int) == .usize_type); + .comptime_field => |field_val| { + assert(field_val != .none); ip.items.appendAssumeCapacity(.{ - .tag = .ptr_int, - .data = try ip.addExtra(gpa, PtrAddr{ + .tag = .ptr_comptime_field, + .data = try ip.addExtra(gpa, PtrComptimeField{ .ty = ptr.ty, - .addr = int, + .field_val = field_val, }), }); }, - .eu_payload, .opt_payload => |data| { - assert(data != .none); + .int, .eu_payload, .opt_payload => |base| { + switch (ptr.addr) { + .int => assert(ip.typeOf(base) == .usize_type), + .eu_payload => assert(ip.indexToKey( + ip.indexToKey(ip.typeOf(base)).ptr_type.elem_type, + ) == .error_union_type), + .opt_payload => assert(ip.indexToKey( + ip.indexToKey(ip.typeOf(base)).ptr_type.elem_type, + ) == .opt_type), + else => unreachable, + } ip.items.appendAssumeCapacity(.{ .tag = switch (ptr.addr) { + .int => .ptr_int, .eu_payload => .ptr_eu_payload, .opt_payload => .ptr_opt_payload, else => unreachable, }, - .data = @enumToInt(data), - }); - }, - .comptime_field => |field_val| { - assert(field_val != .none); - ip.items.appendAssumeCapacity(.{ - .tag = .ptr_comptime_field, - .data = try ip.addExtra(gpa, PtrComptimeField{ + .data = try ip.addExtra(gpa, PtrBase{ .ty = ptr.ty, - .field_val = field_val, + .base = base, }), }); }, .elem, .field => |base_index| { - assert(base_index.base != .none); + const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type; + switch (base_ptr_type.size) { + .One => switch (ip.indexToKey(base_ptr_type.elem_type)) { + .array_type, .vector_type => assert(ptr.addr == .elem), + .anon_struct_type => |anon_struct_type| { + assert(ptr.addr == .field); + assert(base_index.index < anon_struct_type.types.len); + }, + .struct_type => |struct_type| { + assert(ptr.addr == .field); + assert(base_index.index < ip.structPtrUnwrapConst(struct_type.index).?.fields.count()); + }, + .union_type => |union_type| { + assert(ptr.addr == .field); + assert(base_index.index < ip.unionPtrConst(union_type.index).fields.count()); + }, + .ptr_type => |slice_type| { + assert(ptr.addr == .field); + assert(slice_type.size == .Slice); + assert(base_index.index < 2); + }, + else => unreachable, + }, + .Many => assert(ptr.addr == .elem), + .Slice, .C => unreachable, + } _ = ip.map.pop(); const index_index = try ip.get(gpa, .{ .int = .{ .ty = .usize_type, @@ -4750,10 +4761,10 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .simple_value => 0, .ptr_decl => @sizeOf(PtrDecl), .ptr_mut_decl => @sizeOf(PtrMutDecl), - .ptr_int => @sizeOf(PtrAddr), - .ptr_eu_payload => 0, - .ptr_opt_payload => 0, .ptr_comptime_field => @sizeOf(PtrComptimeField), + .ptr_int => @sizeOf(PtrBase), + .ptr_eu_payload => @sizeOf(PtrBase), + .ptr_opt_payload => @sizeOf(PtrBase), .ptr_elem => @sizeOf(PtrBaseIndex), .ptr_field => @sizeOf(PtrBaseIndex), .ptr_slice => @sizeOf(PtrSlice), @@ -5281,12 +5292,12 @@ pub fn zigTypeTagOrPoison(ip: InternPool, index: Index) error{GenericPoison}!std .undef, .runtime_value, .simple_value, - .ptr_mut_decl, .ptr_decl, + .ptr_mut_decl, + .ptr_comptime_field, .ptr_int, .ptr_eu_payload, .ptr_opt_payload, - .ptr_comptime_field, .ptr_elem, .ptr_field, .ptr_slice, diff --git a/src/Module.zig b/src/Module.zig index 36037bb49c11..49cf9387a944 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6716,6 +6716,10 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { return ptrType(mod, .{ .elem_type = child_type.toIntern(), .is_const = true }); } +pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ .elem_type = child_type.toIntern(), .size = .Many, .is_const = true }); +} + pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { const info = Type.ptrInfoIp(mod.intern_pool, ptr_ty.toIntern()); return mod.ptrType(.{ diff --git a/src/Sema.zig b/src/Sema.zig index 87d66aad1f70..bd3cfad50d29 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -25412,11 +25412,13 @@ fn elemVal( const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr_ty = try sema.elemPtrType(indexable_ty, index); - const elem_ptr_val = try indexable_val.elemPtr(elem_ptr_ty, index, mod); + const elem_ty = indexable_ty.elemType2(mod); + const many_ptr_ty = try mod.manyConstPtrType(elem_ty); + const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty); + const elem_ptr_ty = try mod.singleConstPtrType(elem_ty); + const elem_ptr_val = try many_ptr_val.elemPtr(elem_ptr_ty, index, mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { - const result_ty = indexable_ty.elemType2(mod); - return sema.addConstant(result_ty, try mod.getCoerced(elem_val, result_ty)); + return sema.addConstant(elem_ty, try mod.getCoerced(elem_val, elem_ty)); } break :rs indexable_src; }; @@ -29906,7 +29908,7 @@ fn analyzeSlice( const ptr_ptr_ty = sema.typeOf(ptr_ptr); const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) { .Pointer => ptr_ptr_ty.childType(mod), - else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}), + else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(mod)}), }; var array_ty = ptr_ptr_child_ty; @@ -30111,7 +30113,10 @@ fn analyzeSlice( const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); - const elem_ptr = try ptr_val.elemPtr(try sema.elemPtrType(new_ptr_ty, sentinel_index), sentinel_index, sema.mod); + const many_ptr_ty = try mod.manyConstPtrType(elem_ty); + const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty); + const elem_ptr_ty = try mod.singleConstPtrType(elem_ty); + const elem_ptr = try many_ptr_val.elemPtr(elem_ptr_ty, sentinel_index, mod); const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, @@ -30120,23 +30125,23 @@ fn analyzeSlice( block, src, "comptime dereference requires '{}' to have a well-defined layout, but it does not.", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ), .out_of_bounds => |ty| return sema.fail( block, end_src, "slice end index {d} exceeds bounds of containing decl of type '{}'", - .{ end_int, ty.fmt(sema.mod) }, + .{ end_int, ty.fmt(mod) }, ), }; - if (!actual_sentinel.eql(expected_sentinel, elem_ty, sema.mod)) { + if (!actual_sentinel.eql(expected_sentinel, elem_ty, mod)) { const msg = msg: { const msg = try sema.errMsg(block, src, "value in memory does not match slice sentinel", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, src, msg, "expected '{}', found '{}'", .{ - expected_sentinel.fmtValue(elem_ty, sema.mod), - actual_sentinel.fmtValue(elem_ty, sema.mod), + expected_sentinel.fmtValue(elem_ty, mod), + actual_sentinel.fmtValue(elem_ty, mod), }); break :msg msg; @@ -30310,7 +30315,7 @@ fn cmpNumeric( const lhs_ty_tag = lhs_ty.zigTypeTag(mod); const rhs_ty_tag = rhs_ty.zigTypeTag(mod); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); // One exception to heterogeneous comparison: comptime_float needs to // coerce to fixed-width float. diff --git a/src/value.zig b/src/value.zig index 0da562693756..a1f0f6818777 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1857,7 +1857,8 @@ pub const Value = struct { .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod)) .toValue().elemValue(mod, index), - .int, .eu_payload, .opt_payload => unreachable, + .int, .eu_payload => unreachable, + .opt_payload => |base| base.toValue().elemValue(mod, index), .comptime_field => |field_val| field_val.toValue().elemValue(mod, index), .elem => |elem| elem.base.toValue().elemValue(mod, index + elem.index), .field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| { @@ -1866,6 +1867,7 @@ pub const Value = struct { return field_val.elemValue(mod, index); } else unreachable, }, + .opt => |opt| opt.val.toValue().elemValue(mod, index), .aggregate => |aggregate| { const len = mod.intern_pool.aggregateTypeLen(aggregate.ty); if (index < len) return switch (aggregate.storage) { From a702af062bb65673ba554dba330b4c5ca8d50f3e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 29 May 2023 08:21:47 -0400 Subject: [PATCH 123/205] x86_64: fix InternPool regressions --- src/Sema.zig | 10 +++- src/arch/x86_64/CodeGen.zig | 5 +- src/codegen.zig | 94 ++++++++++++++++++++----------------- 3 files changed, 61 insertions(+), 48 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index bd3cfad50d29..81e2c6e2ae71 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -24311,7 +24311,10 @@ fn fieldVal( .inferred_error_set_type => { return sema.fail(block, src, "TODO handle inferred error sets here", .{}); }, - .simple_type => |t| assert(t == .anyerror), + .simple_type => |t| { + assert(t == .anyerror); + _ = try mod.getErrorValue(field_name); + }, else => unreachable, } @@ -24529,7 +24532,10 @@ fn fieldPtr( .inferred_error_set_type => { return sema.fail(block, src, "TODO handle inferred error sets here", .{}); }, - .simple_type => |t| assert(t == .anyerror), + .simple_type => |t| { + assert(t == .anyerror); + _ = try mod.getErrorValue(field_name); + }, else => unreachable, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index fca1b25a1d86..dbb3d977b86c 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2128,10 +2128,7 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live const dies = @truncate(u1, tomb_bits) != 0; tomb_bits >>= 1; if (!dies) continue; - const op_int = @enumToInt(op); - if (op_int < Air.ref_start_index) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); - self.processDeath(op_index); + self.processDeath(Air.refToIndexAllowNone(op) orelse continue); } self.finishAirResult(inst, result); } diff --git a/src/codegen.zig b/src/codegen.zig index 0034e96e35d8..983d895991f9 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -387,22 +387,24 @@ pub fn generateSymbol( }, .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.toIntern())) { .array_type => |array_type| { - var index: u64 = 0; - while (index < array_type.len) : (index += 1) { - switch (aggregate.storage) { - .bytes => |bytes| try code.appendSlice(bytes), - .elems, .repeated_elem => switch (try generateSymbol(bin_file, src_loc, .{ - .ty = array_type.child.toType(), - .val = switch (aggregate.storage) { - .bytes => unreachable, - .elems => |elems| elems[@intCast(usize, index)], - .repeated_elem => |elem| elem, - }.toValue(), - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, - }, - } + switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => { + var index: u64 = 0; + while (index < array_type.len) : (index += 1) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = array_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + } + }, } if (array_type.sentinel != .none) { @@ -416,22 +418,24 @@ pub fn generateSymbol( } }, .vector_type => |vector_type| { - var index: u32 = 0; - while (index < vector_type.len) : (index += 1) { - switch (aggregate.storage) { - .bytes => |bytes| try code.appendSlice(bytes), - .elems, .repeated_elem => switch (try generateSymbol(bin_file, src_loc, .{ - .ty = vector_type.child.toType(), - .val = switch (aggregate.storage) { - .bytes => unreachable, - .elems => |elems| elems[@intCast(usize, index)], - .repeated_elem => |elem| elem, - }.toValue(), - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, - }, - } + switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => { + var index: u64 = 0; + while (index < vector_type.len) : (index += 1) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = vector_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + } + }, } const padding = math.cast(usize, typed_value.ty.abiSize(mod) - @@ -669,7 +673,7 @@ fn lowerParentPtr( mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))), ), .field => |field| { - const base_type = mod.intern_pool.typeOf(field.base); + const base_type = mod.intern_pool.indexToKey(mod.intern_pool.typeOf(field.base)).ptr_type.elem_type; return lowerParentPtr( bin_file, src_loc, @@ -688,7 +692,7 @@ fn lowerParentPtr( .struct_type, .anon_struct_type, .union_type, - => @intCast(u32, base_type.toType().childType(mod).structFieldOffset( + => @intCast(u32, base_type.toType().structFieldOffset( @intCast(u32, field.index), mod, )), @@ -989,17 +993,23 @@ pub fn genTypedValue( return GenResult.mcv(.{ .immediate = error_index }); }, .ErrorUnion => { - const error_type = typed_value.ty.errorUnionSet(mod); + const err_type = typed_value.ty.errorUnionSet(mod); const payload_type = typed_value.ty.errorUnionPayload(mod); - const is_pl = typed_value.val.errorUnionIsPayload(mod); - if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) typed_value.val else try mod.intValue(error_type, 0); - return genTypedValue(bin_file, src_loc, .{ - .ty = error_type, - .val = err_val, - }, owner_decl_index); + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern()).error_union.val) { + .err_name => |err_name| return genTypedValue(bin_file, src_loc, .{ + .ty = err_type, + .val = (try mod.intern(.{ .err = .{ + .ty = err_type.toIntern(), + .name = err_name, + } })).toValue(), + }, owner_decl_index), + .payload => return genTypedValue(bin_file, src_loc, .{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, 0), + }, owner_decl_index), + } } }, From 5580a69d714af92883e2031a131c30917162dc15 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 29 May 2023 11:32:12 -0400 Subject: [PATCH 124/205] cbe: fix InternPool regressions --- src/Module.zig | 7 +- src/codegen/c.zig | 432 +++++++++++++++++----------------------------- 2 files changed, 164 insertions(+), 275 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 49cf9387a944..22ac947cdfbc 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -707,8 +707,11 @@ pub const Decl = struct { return TypedValue{ .ty = decl.ty, .val = decl.val }; } - pub fn internValue(decl: Decl, mod: *Module) Allocator.Error!InternPool.Index { - return decl.val.intern(decl.ty, mod); + pub fn internValue(decl: *Decl, mod: *Module) Allocator.Error!InternPool.Index { + assert(decl.has_tv); + const ip_index = try decl.val.intern(decl.ty, mod); + decl.val = ip_index.toValue(); + return ip_index; } pub fn isFunction(decl: Decl, mod: *const Module) !bool { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 2f65513dcd81..d705d6143ef0 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -560,7 +560,7 @@ pub const DeclGen = struct { // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug // somewhere and we should let the C compiler tell us about it. - const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.eql(decl.ty, mod); + const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.childType(mod).eql(decl.ty, mod); if (need_typecast) { try writer.writeAll("(("); try dg.renderType(writer, ty); @@ -581,6 +581,7 @@ pub const DeclGen = struct { ) error{ OutOfMemory, AnalysisFail }!void { const mod = dg.module; const ptr_ty = mod.intern_pool.typeOf(ptr_val).toType(); + const ptr_cty = try dg.typeToIndex(ptr_ty, .complete); const ptr = mod.intern_pool.indexToKey(ptr_val).ptr; switch (ptr.addr) { .decl, .mut_decl => try dg.renderDeclValue( @@ -598,22 +599,66 @@ pub const DeclGen = struct { try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other), }), .eu_payload, .opt_payload => |base| { - const base_ty = mod.intern_pool.typeOf(base).toType().childType(mod); + const ptr_base_ty = mod.intern_pool.typeOf(base).toType(); + const base_ty = ptr_base_ty.childType(mod); // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(base_ty, .complete); + const payload_ty = switch (ptr.addr) { + .eu_payload => base_ty.errorUnionPayload(mod), + .opt_payload => base_ty.optionalChild(mod), + else => unreachable, + }; + const ptr_payload_ty = try mod.adjustPtrTypeChild(ptr_base_ty, payload_ty); + const ptr_payload_cty = try dg.typeToIndex(ptr_payload_ty, .complete); + if (ptr_cty != ptr_payload_cty) { + try writer.writeByte('('); + try dg.renderCType(writer, ptr_cty); + try writer.writeByte(')'); + } try writer.writeAll("&("); try dg.renderParentPtr(writer, base, location); try writer.writeAll(")->payload"); }, .elem => |elem| { + const ptr_base_ty = mod.intern_pool.typeOf(elem.base).toType(); + const elem_ty = ptr_base_ty.elemType2(mod); + const ptr_elem_ty = try mod.adjustPtrTypeChild(ptr_base_ty, elem_ty); + const ptr_elem_cty = try dg.typeToIndex(ptr_elem_ty, .complete); + if (ptr_cty != ptr_elem_cty) { + try writer.writeByte('('); + try dg.renderCType(writer, ptr_cty); + try writer.writeByte(')'); + } try writer.writeAll("&("); + if (mod.intern_pool.indexToKey(ptr_base_ty.toIntern()).ptr_type.size == .One) + try writer.writeByte('*'); try dg.renderParentPtr(writer, elem.base, location); try writer.print(")[{d}]", .{elem.index}); }, .field => |field| { - const base_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); + const ptr_base_ty = mod.intern_pool.typeOf(field.base).toType(); + const base_ty = ptr_base_ty.childType(mod); // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(base_ty, .complete); + const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) { + .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(field.index, mod), + .ptr_type => |ptr_type| switch (ptr_type.size) { + .One, .Many, .C => unreachable, + .Slice => switch (field.index) { + Value.slice_ptr_index => base_ty.slicePtrFieldType(mod), + Value.slice_len_index => Type.usize, + else => unreachable, + }, + }, + else => unreachable, + }; + const ptr_field_ty = try mod.adjustPtrTypeChild(ptr_base_ty, field_ty); + const ptr_field_cty = try dg.typeToIndex(ptr_field_ty, .complete); + if (ptr_cty != ptr_field_cty) { + try writer.writeByte('('); + try dg.renderCType(writer, ptr_cty); + try writer.writeByte(')'); + } switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) { .begin => try dg.renderParentPtr(writer, field.base, location), .field => |name| { @@ -861,234 +906,6 @@ pub const DeclGen = struct { unreachable; } - if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { - .Array, .Vector => { - if (location == .FunctionArgument) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - // First try specific tag representations for more efficiency. - switch (val.toIntern()) { - .undef => { - const ai = ty.arrayInfo(mod); - try writer.writeByte('{'); - if (ai.sentinel) |s| { - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } else { - try writer.writeByte('0'); - } - try writer.writeByte('}'); - return; - }, - .empty_struct => { - const ai = ty.arrayInfo(mod); - try writer.writeByte('{'); - if (ai.sentinel) |s| { - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } else { - try writer.writeByte('0'); - } - try writer.writeByte('}'); - return; - }, - else => {}, - } - // Fall back to generic implementation. - - // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal - const max_string_initializer_len = 65535; - - const ai = ty.arrayInfo(mod); - if (ai.elem_type.eql(Type.u8, mod)) { - if (ai.len <= max_string_initializer_len) { - var literal = stringLiteral(writer); - try literal.start(); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - const elem_val = try val.elemValue(mod, index); - const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); - try literal.writeChar(elem_val_u8); - } - if (ai.sentinel) |s| { - const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); - if (s_u8 != 0) try literal.writeChar(s_u8); - } - try literal.end(); - } else { - try writer.writeByte('{'); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(mod, index); - const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); - try writer.print("'\\x{x}'", .{elem_val_u8}); - } - if (ai.sentinel) |s| { - if (index != 0) try writer.writeByte(','); - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } - try writer.writeByte('}'); - } - } else { - try writer.writeByte('{'); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(mod, index); - try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); - } - if (ai.sentinel) |s| { - if (index != 0) try writer.writeByte(','); - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } - try writer.writeByte('}'); - } - }, - .Struct => switch (ty.containerLayout(mod)) { - .Auto, .Extern => { - const field_vals = val.castTag(.aggregate).?.data; - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - try writer.writeByte('{'); - var empty = true; - for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i, mod)) continue; - const field_ty = ty.structFieldType(field_i, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - - if (!empty) try writer.writeByte(','); - try dg.renderValue(writer, field_ty, field_val, initializer_type); - - empty = false; - } - try writer.writeByte('}'); - }, - .Packed => { - const field_vals = val.castTag(.aggregate).?.data; - const int_info = ty.intInfo(mod); - - const bits = Type.smallestUnsignedBits(int_info.bits - 1); - const bit_offset_ty = try mod.intType(.unsigned, bits); - - var bit_offset: u64 = 0; - - var eff_num_fields: usize = 0; - for (0..field_vals.len) |field_i| { - if (ty.structFieldIsComptime(field_i, mod)) continue; - const field_ty = ty.structFieldType(field_i, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - - eff_num_fields += 1; - } - - if (eff_num_fields == 0) { - try writer.writeByte('('); - try dg.renderValue(writer, ty, Value.undef, initializer_type); - try writer.writeByte(')'); - } else if (ty.bitSize(mod) > 64) { - // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) - var num_or = eff_num_fields - 1; - while (num_or > 0) : (num_or -= 1) { - try writer.writeAll("zig_or_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte('('); - } - - var eff_index: usize = 0; - var needs_closing_paren = false; - for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i, mod)) continue; - const field_ty = ty.structFieldType(field_i, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - - const cast_context = IntCastContext{ .value = .{ .value = field_val } }; - if (bit_offset != 0) { - try writer.writeAll("zig_shl_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte('('); - try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); - try writer.writeAll(", "); - const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); - try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); - try writer.writeByte(')'); - } else { - try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); - } - - if (needs_closing_paren) try writer.writeByte(')'); - if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); - - bit_offset += field_ty.bitSize(mod); - needs_closing_paren = true; - eff_index += 1; - } - } else { - try writer.writeByte('('); - // a << a_off | b << b_off | c << c_off - var empty = true; - for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i, mod)) continue; - const field_ty = ty.structFieldType(field_i, mod); - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - - if (!empty) try writer.writeAll(" | "); - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - - if (bit_offset != 0) { - try dg.renderValue(writer, field_ty, field_val, .Other); - try writer.writeAll(" << "); - const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); - try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); - } else { - try dg.renderValue(writer, field_ty, field_val, .Other); - } - - bit_offset += field_ty.bitSize(mod); - empty = false; - } - try writer.writeByte(')'); - } - }, - }, - - .Frame, - .AnyFrame, - => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ - @tagName(tag), - }), - - .Float, - .Union, - .Optional, - .ErrorUnion, - .ErrorSet, - .Int, - .Enum, - .Bool, - .Pointer, - => unreachable, // handled below - .Type, - .Void, - .NoReturn, - .ComptimeFloat, - .ComptimeInt, - .Undefined, - .Null, - .Opaque, - .EnumLiteral, - .Fn, - => unreachable, // comptime-only types - }; - switch (mod.intern_pool.indexToKey(val.ip_index)) { // types, not values .int_type, @@ -1144,10 +961,24 @@ pub const DeclGen = struct { .error_union => |error_union| { const payload_ty = ty.errorUnionPayload(mod); const error_ty = ty.errorUnionSet(mod); - const error_val = if (val.errorUnionIsPayload(mod)) try mod.intValue(Type.err_int, 0) else val; - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return dg.renderValue(writer, Type.err_int, error_val, location); + switch (error_union.val) { + .err_name => |err_name| return dg.renderValue( + writer, + error_ty, + (try mod.intern(.{ .err = .{ + .ty = error_ty.toIntern(), + .name = err_name, + } })).toValue(), + location, + ), + .payload => return dg.renderValue( + writer, + Type.err_int, + try mod.intValue(Type.err_int, 0), + location, + ), + } } if (!location.isInitializer()) { @@ -1156,15 +987,34 @@ pub const DeclGen = struct { try writer.writeByte(')'); } - const payload_val = switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), - .payload => |payload| payload, - }.toValue(); - try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, initializer_type); + try dg.renderValue( + writer, + payload_ty, + switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(), + initializer_type, + ); try writer.writeAll(", .error = "); - try dg.renderValue(writer, error_ty, error_val, initializer_type); + switch (error_union.val) { + .err_name => |err_name| try dg.renderValue( + writer, + error_ty, + (try mod.intern(.{ .err = .{ + .ty = error_ty.toIntern(), + .name = err_name, + } })).toValue(), + location, + ), + .payload => try dg.renderValue( + writer, + Type.err_int, + try mod.intValue(Type.err_int, 0), + location, + ), + } try writer.writeAll(" }"); }, .enum_tag => { @@ -1272,30 +1122,42 @@ pub const DeclGen = struct { } try writer.writeByte('{'); } + const ptr_location = switch (ptr.len) { + .none => location, + else => initializer_type, + }; + const ptr_ty = switch (ptr.len) { + .none => ty, + else => ty.slicePtrFieldType(mod), + }; + const ptr_val = switch (ptr.len) { + .none => val, + else => val.slicePtr(mod), + }; switch (ptr.addr) { .decl, .mut_decl => try dg.renderDeclValue( writer, - ty, - val, + ptr_ty, + ptr_val, switch (ptr.addr) { .decl => |decl| decl, .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, }, - location, + ptr_location, ), .int => |int| { try writer.writeAll("(("); - try dg.renderType(writer, ty); + try dg.renderType(writer, ptr_ty); try writer.print("){x})", .{ - try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other), + try dg.fmtIntLiteral(Type.usize, int.toValue(), ptr_location), }); }, .eu_payload, .opt_payload, .elem, .field, - => try dg.renderParentPtr(writer, val.ip_index, location), + => try dg.renderParentPtr(writer, ptr_val.ip_index, ptr_location), .comptime_field => unreachable, } if (ptr.len != .none) { @@ -1311,10 +1173,19 @@ pub const DeclGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return dg.renderValue(writer, Type.bool, is_null_val, location); - if (ty.optionalReprIsPayload(mod)) switch (opt.val) { - .none => return writer.writeByte('0'), - else => |payload| return dg.renderValue(writer, payload_ty, payload.toValue(), location), - }; + if (ty.optionalReprIsPayload(mod)) return dg.renderValue( + writer, + payload_ty, + switch (opt.val) { + .none => switch (payload_ty.zigTypeTag(mod)) { + .ErrorSet => try mod.intValue(Type.err_int, 0), + .Pointer => try mod.getCoerced(val, payload_ty), + else => unreachable, + }, + else => |payload| payload.toValue(), + }, + location, + ); if (!location.isInitializer()) { try writer.writeByte('('); @@ -2535,7 +2406,7 @@ pub fn genErrDecls(o: *Object) !void { try writer.writeAll("enum {\n"); o.indent_writer.pushIndent(); var max_name_len: usize = 0; - for (mod.error_name_list.items, 0..) |name, value| { + for (mod.error_name_list.items[1..], 1..) |name, value| { max_name_len = std.math.max(name.len, max_name_len); const err_val = try mod.intern(.{ .err = .{ .ty = .anyerror_type, @@ -2562,21 +2433,21 @@ pub fn genErrDecls(o: *Object) !void { .child = .u8_type, .sentinel = .zero_u8, }); - - var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; - const name_val = Value.initPayload(&name_pl.base); + const name_val = try mod.intern(.{ .aggregate = .{ + .ty = name_ty.toIntern(), + .storage = .{ .bytes = name }, + } }); try writer.writeAll("static "); try o.dg.renderTypeAndName(writer, name_ty, .{ .identifier = identifier }, Const, 0, .complete); try writer.writeAll(" = "); - try o.dg.renderValue(writer, name_ty, name_val, .StaticInitializer); + try o.dg.renderValue(writer, name_ty, name_val.toValue(), .StaticInitializer); try writer.writeAll(";\n"); } const name_array_ty = try mod.arrayType(.{ .len = mod.error_name_list.items.len, .child = .slice_const_u8_sentinel_0_type, - .sentinel = .zero_u8, }); try writer.writeAll("static "); @@ -2588,7 +2459,7 @@ pub fn genErrDecls(o: *Object) !void { const len_val = try mod.intValue(Type.usize, name.len); try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{ - fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other), + fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .StaticInitializer), }); } try writer.writeAll("};\n"); @@ -2642,10 +2513,10 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { .child = .u8_type, .sentinel = .zero_u8, }); - - var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; - const name_val = Value.initPayload(&name_pl.base); - + const name_val = try mod.intern(.{ .aggregate = .{ + .ty = name_ty.toIntern(), + .storage = .{ .bytes = name }, + } }); const len_val = try mod.intValue(Type.usize, name.len); try w.print(" case {}: {{\n static ", .{ @@ -2653,7 +2524,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { }); try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, 0, .complete); try w.writeAll(" = "); - try o.dg.renderValue(w, name_ty, name_val, .Initializer); + try o.dg.renderValue(w, name_ty, name_val.toValue(), .Initializer); try w.writeAll(";\n return ("); try o.dg.renderType(w, name_slice_ty); try w.print("){{{}, {}}};\n", .{ @@ -2789,7 +2660,7 @@ pub fn genDecl(o: *Object) !void { const mod = o.dg.module; const decl = o.dg.decl.?; const decl_c_value = .{ .decl = o.dg.decl_index.unwrap().? }; - const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; + const tv: TypedValue = .{ .ty = decl.ty, .val = (try decl.internValue(mod)).toValue() }; if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return; if (tv.val.getExternFunc(mod)) |_| { @@ -4771,6 +4642,7 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(") "); try genBodyResolveState(f, inst, liveness_condbr.then_deaths, then_body, false); + try writer.writeByte('\n'); // We don't need to use `genBodyResolveState` for the else block, because this instruction is // noreturn so must terminate a body, therefore we don't need to leave `value_map` or @@ -5165,7 +5037,7 @@ fn airIsNull( TypedValue{ .ty = Type.bool, .val = Value.true } else if (optional_ty.isPtrLikeOptional(mod)) // operand is a regular pointer, test `operand !=/== NULL` - TypedValue{ .ty = optional_ty, .val = try mod.nullValue(optional_ty) } + TypedValue{ .ty = optional_ty, .val = try mod.getCoerced(Value.null, optional_ty) } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) TypedValue{ .ty = Type.err_int, .val = try mod.intValue(Type.err_int, 0) } else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { @@ -5778,7 +5650,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); try f.writeCValueDeref(writer, operand); try writer.writeAll(".error = "); - try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); + try f.object.dg.renderValue(writer, Type.err_int, try mod.intValue(Type.err_int, 0), .Other); try writer.writeAll(";\n"); // Then return the payload pointer (only if it is used) @@ -6760,27 +6632,41 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(" = "); try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { - .Or, .Xor, .Add => try mod.intValue(scalar_ty, 0), + .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) { + .Bool => Value.false, + .Int => try mod.intValue(scalar_ty, 0), + else => unreachable, + }, .And => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => try mod.intValue(Type.comptime_int, 1), - else => switch (scalar_ty.intInfo(mod).signedness) { + .Bool => Value.true, + .Int => switch (scalar_ty.intInfo(mod).signedness) { .unsigned => try scalar_ty.maxIntScalar(mod, scalar_ty), .signed => try mod.intValue(scalar_ty, -1), }, + else => unreachable, + }, + .Add => switch (scalar_ty.zigTypeTag(mod)) { + .Int => try mod.intValue(scalar_ty, 0), + .Float => try mod.floatValue(scalar_ty, 0.0), + else => unreachable, + }, + .Mul => switch (scalar_ty.zigTypeTag(mod)) { + .Int => try mod.intValue(scalar_ty, 1), + .Float => try mod.floatValue(scalar_ty, 1.0), + else => unreachable, }, .Min => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.one_comptime_int, + .Bool => Value.true, .Int => try scalar_ty.maxIntScalar(mod, scalar_ty), .Float => try mod.floatValue(scalar_ty, std.math.nan_f128), else => unreachable, }, .Max => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => try mod.intValue(scalar_ty, 0), - .Int => try scalar_ty.minInt(mod, scalar_ty), + .Bool => Value.false, + .Int => try scalar_ty.minIntScalar(mod, scalar_ty), .Float => try mod.floatValue(scalar_ty, std.math.nan_f128), else => unreachable, }, - .Mul => try mod.intValue(Type.comptime_int, 1), }, .Initializer); try writer.writeAll(";\n"); From 804740af4ced8389e21df31a908c4212e32a477a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 29 May 2023 13:47:13 -0700 Subject: [PATCH 125/205] InternPool: avoid indexToKey recursion for type_slice This is a hot function, and recursion makes it more difficult to profile, as well as likely making it more difficult to optimize. --- src/InternPool.zig | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 7bfecf46b6c7..008f0f4df11b 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2455,10 +2455,21 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .type_slice => { - const ptr_type_index = @intToEnum(Index, data); - var result = ip.indexToKey(ptr_type_index).ptr_type; - result.size = .Slice; - return .{ .ptr_type = result }; + assert(ip.items.items(.tag)[data] == .type_pointer); + const ptr_info = ip.extraData(Pointer, ip.items.items(.data)[data]); + return .{ .ptr_type = .{ + .elem_type = ptr_info.child, + .sentinel = ptr_info.sentinel, + .alignment = ptr_info.flags.alignment, + .size = .Slice, + .is_const = ptr_info.flags.is_const, + .is_volatile = ptr_info.flags.is_volatile, + .is_allowzero = ptr_info.flags.is_allowzero, + .address_space = ptr_info.flags.address_space, + .vector_index = ptr_info.flags.vector_index, + .host_size = ptr_info.packed_offset.host_size, + .bit_offset = ptr_info.packed_offset.bit_offset, + } }; }, .type_optional => .{ .opt_type = @intToEnum(Index, data) }, From b336866fbc1edd4c999d3cd5d62ae7230d176fa7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 29 May 2023 15:30:56 -0700 Subject: [PATCH 126/205] InternPool: avoid indexToKey recursion for ptr_elem,ptr_field This is a hot function, and recursion makes it more difficult to profile, as well as likely making it more difficult to optimize. --- src/InternPool.zig | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 008f0f4df11b..f1acbdfebfdc 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2625,24 +2625,36 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, .ptr_elem => { + // Avoid `indexToKey` recursion by asserting the tag encoding. const info = ip.extraData(PtrBaseIndex, data); - return .{ .ptr = .{ - .ty = info.ty, - .addr = .{ .elem = .{ - .base = info.base, - .index = ip.indexToKey(info.index).int.storage.u64, + const index_item = ip.items.get(@enumToInt(info.index)); + return switch (index_item.tag) { + .int_usize => .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .elem = .{ + .base = info.base, + .index = index_item.data, + } }, } }, - } }; + .int_positive => @panic("TODO"), // implement along with behavior test coverage + else => unreachable, + }; }, .ptr_field => { + // Avoid `indexToKey` recursion by asserting the tag encoding. const info = ip.extraData(PtrBaseIndex, data); - return .{ .ptr = .{ - .ty = info.ty, - .addr = .{ .field = .{ - .base = info.base, - .index = ip.indexToKey(info.index).int.storage.u64, + const index_item = ip.items.get(@enumToInt(info.index)); + return switch (index_item.tag) { + .int_usize => .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .field = .{ + .base = info.base, + .index = index_item.data, + } }, } }, - } }; + .int_positive => @panic("TODO"), // implement along with behavior test coverage + else => unreachable, + }; }, .ptr_slice => { const info = ip.extraData(PtrSlice, data); From f2778f7ca07cdfd599c65185dbcc6a648740fd5d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 29 May 2023 16:52:21 -0700 Subject: [PATCH 127/205] InternPool: avoid indexToKey recursion for only_possible_value This is a hot function, and recursion makes it more difficult to profile, as well as likely making it more difficult to optimize. --- src/InternPool.zig | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index f1acbdfebfdc..6e442d9ae5a5 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2765,8 +2765,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .func => .{ .func = ip.extraData(Key.Func, data) }, .only_possible_value => { const ty = @intToEnum(Index, data); - return switch (ip.indexToKey(ty)) { - .array_type, .vector_type => .{ .aggregate = .{ + const ty_item = ip.items.get(@enumToInt(ty)); + return switch (ty_item.tag) { + .type_array_big, .type_array_small, .type_vector => .{ .aggregate = .{ .ty = ty, .storage = .{ .elems = &.{} }, } }, @@ -2775,16 +2776,23 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { // have a slice of comptime values that can be used here for when // the struct has one possible value due to all fields comptime (same // as the tuple case below). - .struct_type => .{ .aggregate = .{ + .type_struct, .type_struct_ns => .{ .aggregate = .{ .ty = ty, .storage = .{ .elems = &.{} }, } }, + // There is only one possible value precisely due to the // fact that this values slice is fully populated! - .anon_struct_type => |anon_struct_type| .{ .aggregate = .{ - .ty = ty, - .storage = .{ .elems = anon_struct_type.values }, - } }, + .type_struct_anon, .type_tuple_anon => { + const type_struct_anon = ip.extraDataTrail(TypeStructAnon, ty_item.data); + const fields_len = type_struct_anon.data.fields_len; + const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + return .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = @ptrCast([]const Index, values) }, + } }; + }, + else => unreachable, }; }, From f7177fb8215714f9f22e3f9da0d3c7d3ad58d390 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 29 May 2023 17:18:23 -0700 Subject: [PATCH 128/205] InternPool: avoid indexToKey recursion for opt_payload This is a hot function, and recursion makes it more difficult to profile, as well as likely making it more difficult to optimize. Previously, indexToKey for opt_payload would call getAssumeExists() on the optional type. This made it possible to omit the optional type in the encoding of opt_payload. However, getAssumeExists() *must* call indexToKey because of hashing/equality. So, this commit adds the optional type to the opt_payload encoding, which increases its "extra" size from 0 to 8 bytes. As a result, the opt_payload encoding went from not showing up on the top 25 largest tags to...still not showing up in the top 25 largest tags. This also helps make InternPool.typeOf() no longer need to call indexToKey which is another hot function and another source of recursion. --- src/InternPool.zig | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 6e442d9ae5a5..5230f0fffc9f 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1383,7 +1383,7 @@ pub const Index = enum(u32) { ptr_elem: struct { data: *PtrBaseIndex }, ptr_field: struct { data: *PtrBaseIndex }, ptr_slice: struct { data: *PtrSlice }, - opt_payload: DataIsIndex, + opt_payload: struct { data: *TypeValue }, opt_null: DataIsIndex, int_u8: struct { data: u8 }, int_u16: struct { data: u16 }, @@ -1807,9 +1807,8 @@ pub const Tag = enum(u8) { /// already contains the slice type corresponding to this payload. ptr_slice, /// An optional value that is non-null. - /// data is Index of the payload value. - /// In order to use this encoding, one must ensure that the `InternPool` - /// already contains the optional type corresponding to this payload. + /// data is extra index of `TypeValue`. + /// The type is the optional type (not the payload type). opt_payload, /// An optional value that is null. /// data is Index of the optional type. @@ -2577,15 +2576,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .val = .none, } }, .opt_payload => { - const payload_val = @intToEnum(Index, data); - // The existence of `opt_payload` guarantees that the optional type will be - // stored in the `InternPool`. - const opt_ty = ip.getAssumeExists(.{ - .opt_type = ip.typeOf(payload_val), - }); + const extra = ip.extraData(TypeValue, data); return .{ .opt = .{ - .ty = opt_ty, - .val = payload_val, + .ty = extra.ty, + .val = extra.val, } }; }, .ptr_decl => { @@ -3375,7 +3369,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = @enumToInt(opt.ty), } else .{ .tag = .opt_payload, - .data = @enumToInt(opt.val), + .data = try ip.addExtra(gpa, TypeValue{ + .ty = opt.ty, + .val = opt.val, + }), }); }, @@ -4800,7 +4797,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .ptr_field => @sizeOf(PtrBaseIndex), .ptr_slice => @sizeOf(PtrSlice), .opt_null => 0, - .opt_payload => 0, + .opt_payload => @sizeOf(TypeValue), .int_u8 => 0, .int_u16 => 0, .int_u32 => 0, From 55cda9a592dd5aa030d1134351394e1014fefed1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 29 May 2023 18:14:16 -0700 Subject: [PATCH 129/205] InternPool: avoid indexToKey recursion for ptr_slice Recursion makes this hot function more difficult to profile and optimize. The ptr_slice encoding now additionally includes the slice type. This makes typeOf() implementable without indexToKey() as well as no longer using recursion in the ptr_slice prong of indexToKey itself. Unfortunately some logic had to be duplicated. However, I think that a future enhancement could eliminate the duplication as well as remove some other unwanted code, improving performance, by representing a slice value in `Key.Ptr` without `addr` populated directly, but with an `Index` pointing to the underlying manyptr value. --- src/InternPool.zig | 100 +++++++++++++++++++++++++++++++++++++-------- 1 file changed, 82 insertions(+), 18 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 5230f0fffc9f..f8b71ffc0691 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1803,8 +1803,6 @@ pub const Tag = enum(u8) { ptr_field, /// A slice. /// data is extra index of PtrSlice, which contains the ptr and len values - /// In order to use this encoding, one must ensure that the `InternPool` - /// already contains the slice type corresponding to this payload. ptr_slice, /// An optional value that is non-null. /// data is extra index of `TypeValue`. @@ -2236,7 +2234,11 @@ pub const PtrBaseIndex = struct { }; pub const PtrSlice = struct { + /// The slice type. + ty: Index, + /// A many pointer value. ptr: Index, + /// A usize value. len: Index, }; @@ -2606,16 +2608,25 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .addr = .{ .comptime_field = info.field_val }, } }; }, - .ptr_int, .ptr_eu_payload, .ptr_opt_payload => { + .ptr_int => { const info = ip.extraData(PtrBase, data); return .{ .ptr = .{ .ty = info.ty, - .addr = switch (item.tag) { - .ptr_int => .{ .int = info.base }, - .ptr_eu_payload => .{ .eu_payload = info.base }, - .ptr_opt_payload => .{ .opt_payload = info.base }, - else => unreachable, - }, + .addr = .{ .int = info.base }, + } }; + }, + .ptr_eu_payload => { + const info = ip.extraData(PtrBase, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .eu_payload = info.base }, + } }; + }, + .ptr_opt_payload => { + const info = ip.extraData(PtrBase, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .opt_payload = info.base }, } }; }, .ptr_elem => { @@ -2652,15 +2663,64 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .ptr_slice => { const info = ip.extraData(PtrSlice, data); - const ptr = ip.indexToKey(info.ptr).ptr; - var ptr_type = ip.indexToKey(ptr.ty).ptr_type; - assert(ptr_type.size == .Many); - ptr_type.size = .Slice; - return .{ .ptr = .{ - .ty = ip.getAssumeExists(.{ .ptr_type = ptr_type }), - .addr = ptr.addr, - .len = info.len, - } }; + const ptr_item = ip.items.get(@enumToInt(info.ptr)); + return .{ + .ptr = .{ + .ty = info.ty, + .addr = switch (ptr_item.tag) { + .ptr_decl => .{ + .decl = ip.extraData(PtrDecl, ptr_item.data).decl, + }, + .ptr_mut_decl => b: { + const sub_info = ip.extraData(PtrMutDecl, ptr_item.data); + break :b .{ .mut_decl = .{ + .decl = sub_info.decl, + .runtime_index = sub_info.runtime_index, + } }; + }, + .ptr_comptime_field => .{ + .comptime_field = ip.extraData(PtrComptimeField, ptr_item.data).field_val, + }, + .ptr_int => .{ + .int = ip.extraData(PtrBase, ptr_item.data).base, + }, + .ptr_eu_payload => .{ + .eu_payload = ip.extraData(PtrBase, ptr_item.data).base, + }, + .ptr_opt_payload => .{ + .opt_payload = ip.extraData(PtrBase, ptr_item.data).base, + }, + .ptr_elem => b: { + // Avoid `indexToKey` recursion by asserting the tag encoding. + const sub_info = ip.extraData(PtrBaseIndex, ptr_item.data); + const index_item = ip.items.get(@enumToInt(sub_info.index)); + break :b switch (index_item.tag) { + .int_usize => .{ .elem = .{ + .base = sub_info.base, + .index = index_item.data, + } }, + .int_positive => @panic("TODO"), // implement along with behavior test coverage + else => unreachable, + }; + }, + .ptr_field => b: { + // Avoid `indexToKey` recursion by asserting the tag encoding. + const sub_info = ip.extraData(PtrBaseIndex, ptr_item.data); + const index_item = ip.items.get(@enumToInt(sub_info.index)); + break :b switch (index_item.tag) { + .int_usize => .{ .field = .{ + .base = sub_info.base, + .index = index_item.data, + } }, + .int_positive => @panic("TODO"), // implement along with behavior test coverage + else => unreachable, + }; + }, + else => unreachable, + }, + .len = info.len, + }, + }; }, .int_u8 => .{ .int = .{ .ty = .u8_type, @@ -3340,6 +3400,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } }, else => { + // TODO: change Key.Ptr for slices to reference the manyptr value + // rather than having an addr field directly. Then we can avoid + // these problematic calls to pop(), get(), and getOrPutAdapted(). assert(ptr_type.size == .Slice); _ = ip.map.pop(); var new_key = key; @@ -3352,6 +3415,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { ip.items.appendAssumeCapacity(.{ .tag = .ptr_slice, .data = try ip.addExtra(gpa, PtrSlice{ + .ty = ptr.ty, .ptr = ptr_index, .len = ptr.len, }), From 66ae42bb72a9ad4b1cd44b32fa5be322b07a5ffb Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 29 May 2023 20:52:53 -0400 Subject: [PATCH 130/205] Sema: fix pointer arithmetic on single array pointers --- src/InternPool.zig | 48 ++++++++++++++++++++++++---------------------- src/Sema.zig | 41 ++++++++++++++++++++++++++++----------- src/value.zig | 5 ++++- 3 files changed, 59 insertions(+), 35 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index f8b71ffc0691..1cb2704054d7 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3352,30 +3352,32 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .elem, .field => |base_index| { const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type; - switch (base_ptr_type.size) { - .One => switch (ip.indexToKey(base_ptr_type.elem_type)) { - .array_type, .vector_type => assert(ptr.addr == .elem), - .anon_struct_type => |anon_struct_type| { - assert(ptr.addr == .field); - assert(base_index.index < anon_struct_type.types.len); - }, - .struct_type => |struct_type| { - assert(ptr.addr == .field); - assert(base_index.index < ip.structPtrUnwrapConst(struct_type.index).?.fields.count()); - }, - .union_type => |union_type| { - assert(ptr.addr == .field); - assert(base_index.index < ip.unionPtrConst(union_type.index).fields.count()); - }, - .ptr_type => |slice_type| { - assert(ptr.addr == .field); - assert(slice_type.size == .Slice); - assert(base_index.index < 2); - }, - else => unreachable, + switch (ptr.addr) { + .elem => assert(base_ptr_type.size == .Many), + .field => { + assert(base_ptr_type.size == .One); + switch (ip.indexToKey(base_ptr_type.elem_type)) { + .anon_struct_type => |anon_struct_type| { + assert(ptr.addr == .field); + assert(base_index.index < anon_struct_type.types.len); + }, + .struct_type => |struct_type| { + assert(ptr.addr == .field); + assert(base_index.index < ip.structPtrUnwrapConst(struct_type.index).?.fields.count()); + }, + .union_type => |union_type| { + assert(ptr.addr == .field); + assert(base_index.index < ip.unionPtrConst(union_type.index).fields.count()); + }, + .ptr_type => |slice_type| { + assert(ptr.addr == .field); + assert(slice_type.size == .Slice); + assert(base_index.index < 2); + }, + else => unreachable, + } }, - .Many => assert(ptr.addr == .elem), - .Slice, .C => unreachable, + else => unreachable, } _ = ip.map.pop(); const index_index = try ip.get(gpa, .{ .int = .{ diff --git a/src/Sema.zig b/src/Sema.zig index 81e2c6e2ae71..452efc8583cb 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -15036,10 +15036,7 @@ fn analyzePtrArithmetic( const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset); const ptr_ty = sema.typeOf(ptr); const ptr_info = ptr_ty.ptrInfo(mod); - const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Array) - ptr_info.pointee_type.childType(mod) - else - ptr_info.pointee_type; + assert(ptr_info.size == .Many or ptr_info.size == .C); const new_ptr_ty = t: { // Calculate the new pointer alignment. @@ -15050,7 +15047,7 @@ fn analyzePtrArithmetic( } // If the addend is not a comptime-known value we can still count on // it being a multiple of the type size. - const elem_size = elem_ty.abiSize(mod); + const elem_size = ptr_info.pointee_type.abiSize(mod); const addend = if (opt_off_val) |off_val| a: { const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(mod)); break :a elem_size * off_int; @@ -15081,7 +15078,7 @@ fn analyzePtrArithmetic( const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod)); if (offset_int == 0) return ptr; if (try ptr_val.getUnsignedIntAdvanced(mod, sema)) |addr| { - const elem_size = elem_ty.abiSize(mod); + const elem_size = ptr_info.pointee_type.abiSize(mod); const new_addr = switch (air_tag) { .ptr_add => addr + elem_size * offset_int, .ptr_sub => addr - elem_size * offset_int, @@ -22673,12 +22670,28 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const new_dest_ptr_ty = sema.typeOf(new_dest_ptr); const raw_dest_ptr = if (new_dest_ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty) - else - new_dest_ptr; + else if (new_dest_ptr_ty.ptrSize(mod) == .One) ptr: { + var dest_manyptr_ty_key = mod.intern_pool.indexToKey(new_dest_ptr_ty.toIntern()).ptr_type; + assert(dest_manyptr_ty_key.size == .One); + dest_manyptr_ty_key.elem_type = dest_elem_ty.toIntern(); + dest_manyptr_ty_key.size = .Many; + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src); + } else new_dest_ptr; + + const new_src_ptr_ty = sema.typeOf(new_src_ptr); + const raw_src_ptr = if (new_src_ptr_ty.isSlice(mod)) + try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty) + else if (new_src_ptr_ty.ptrSize(mod) == .One) ptr: { + var src_manyptr_ty_key = mod.intern_pool.indexToKey(new_src_ptr_ty.toIntern()).ptr_type; + assert(src_manyptr_ty_key.size == .One); + src_manyptr_ty_key.elem_type = src_elem_ty.toIntern(); + src_manyptr_ty_key.size = .Many; + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(src_manyptr_ty_key), new_src_ptr, src_src); + } else new_src_ptr; // ok1: dest >= src + len // ok2: src >= dest + len - const src_plus_len = try sema.analyzePtrArithmetic(block, src, new_src_ptr, len, .ptr_add, src_src, src); + const src_plus_len = try sema.analyzePtrArithmetic(block, src, raw_src_ptr, len, .ptr_add, src_src, src); const dest_plus_len = try sema.analyzePtrArithmetic(block, src, raw_dest_ptr, len, .ptr_add, dest_src, src); const ok1 = try block.addBinOp(.cmp_gte, raw_dest_ptr, src_plus_len); const ok2 = try block.addBinOp(.cmp_gte, new_src_ptr, dest_plus_len); @@ -29968,8 +29981,14 @@ fn analyzeSlice( const ptr = if (slice_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty) - else - ptr_or_slice; + else if (array_ty.zigTypeTag(mod) == .Array) ptr: { + var manyptr_ty_key = mod.intern_pool.indexToKey(slice_ty.toIntern()).ptr_type; + assert(manyptr_ty_key.elem_type == array_ty.toIntern()); + assert(manyptr_ty_key.size == .One); + manyptr_ty_key.elem_type = elem_ty.toIntern(); + manyptr_ty_key.size = .Many; + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src); + } else ptr_or_slice; const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); const new_ptr = try sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src); diff --git a/src/value.zig b/src/value.zig index a1f0f6818777..92dd3a3c3f2b 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2079,10 +2079,13 @@ pub const Value = struct { }, else => val, }; + var ptr_ty_key = mod.intern_pool.indexToKey(elem_ptr_ty.toIntern()).ptr_type; + assert(ptr_ty_key.size != .Slice); + ptr_ty_key.size = .Many; return (try mod.intern(.{ .ptr = .{ .ty = elem_ptr_ty.toIntern(), .addr = .{ .elem = .{ - .base = ptr_val.toIntern(), + .base = (try mod.getCoerced(ptr_val, try mod.ptrType(ptr_ty_key))).toIntern(), .index = index, } }, } })).toValue(); From a803e9cf48e9566638e6ec1e23fe0b885e2651ee Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 29 May 2023 21:47:34 -0400 Subject: [PATCH 131/205] Sema: fix vector comparison and interning of -0 --- lib/std/math/big/int.zig | 3 +++ src/Sema.zig | 9 ++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index c4d3ccf0775d..13ead1c42122 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -2158,6 +2158,9 @@ pub const Const = struct { pub fn to(self: Const, comptime T: type) ConvertError!T { switch (@typeInfo(T)) { .Int => |info| { + // Make sure -0 is handled correctly. + if (self.eqZero()) return 0; + const UT = std.meta.Int(.unsigned, info.bits); if (!self.fitsInTwosComp(info.signedness, info.bits)) { diff --git a/src/Sema.zig b/src/Sema.zig index 452efc8583cb..d25b02225450 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -34538,10 +34538,13 @@ fn compareScalar( rhs: Value, ty: Type, ) CompileError!bool { + const mod = sema.mod; + const coerced_lhs = try mod.getCoerced(lhs, ty); + const coerced_rhs = try mod.getCoerced(rhs, ty); switch (op) { - .eq => return sema.valuesEqual(lhs, rhs, ty), - .neq => return !(try sema.valuesEqual(lhs, rhs, ty)), - else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod, sema), + .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty), + .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)), + else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, sema), } } From 27f1ad8afde86c8f734deeb05f5c8cad910275e4 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 29 May 2023 22:22:05 -0400 Subject: [PATCH 132/205] Module: add allowzero canonicalization to pointer types --- src/Module.zig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Module.zig b/src/Module.zig index 22ac947cdfbc..ffc6a95fe17f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6683,6 +6683,8 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type var canon_info = info; const have_elem_layout = info.elem_type.toType().layoutIsResolved(mod); + if (info.size == .C) canon_info.is_allowzero = true; + // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee // type, we change it to 0 here. If this causes an assertion trip because the // pointee type needs to be resolved more, that needs to be done before calling From 66f83f27a2904180bae7797a7c87c6eddc7eebff Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 29 May 2023 22:33:40 -0700 Subject: [PATCH 133/205] InternPool: avoid indexToKey recursion for type_enum_auto Recursion makes this hot function more difficult to profile and optimize. This commit adds the integer tag type to the type_enum_auto encoding even though the integer tag type can be inferred based on the number of fields of the enum. This avoids a call to getAssumeExists of the integer tag type inside indexToKey. --- src/InternPool.zig | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 1cb2704054d7..3fafca0a7451 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2169,6 +2169,9 @@ pub const EnumAuto = struct { decl: Module.Decl.Index, /// This may be `none` if there are no declarations. namespace: Module.Namespace.OptionalIndex, + /// An integer type which is used for the numerical value of the enum, which + /// was inferred by Zig based on the number of tags. + int_tag_type: Index, fields_len: u32, /// Maps field names to declaration index. names_map: MapIndex, @@ -2553,7 +2556,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { return .{ .enum_type = .{ .decl = enum_auto.data.decl, .namespace = enum_auto.data.namespace, - .tag_ty = ip.getEnumIntTagType(enum_auto.data.fields_len), + .tag_ty = enum_auto.data.int_tag_type, .names = names, .values = &.{}, .tag_mode = .auto, @@ -2928,14 +2931,6 @@ fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { }; } -/// Asserts the integer tag type is already present in the InternPool. -fn getEnumIntTagType(ip: InternPool, fields_len: u32) Index { - return ip.getAssumeExists(.{ .int_type = .{ - .bits = if (fields_len == 0) 0 else std.math.log2_int_ceil(u32, fields_len), - .signedness = .unsigned, - } }); -} - fn indexToKeyEnum(ip: InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { const enum_explicit = ip.extraDataTrail(EnumExplicit, data); const names = @ptrCast( @@ -3222,6 +3217,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = ip.addExtraAssumeCapacity(EnumAuto{ .decl = enum_type.decl, .namespace = enum_type.namespace, + .int_tag_type = enum_type.tag_ty, .names_map = names_map, .fields_len = fields_len, }), @@ -4000,18 +3996,18 @@ pub fn getIncompleteEnum( } } -pub fn getIncompleteEnumAuto( +fn getIncompleteEnumAuto( ip: *InternPool, gpa: Allocator, enum_type: Key.IncompleteEnumType, ) Allocator.Error!IncompleteEnumType { - // Although the integer tag type will not be stored in the `EnumAuto` struct, - // `InternPool` logic depends on it being present so that `typeOf` can be infallible. - // Ensure it is present here: - _ = try ip.get(gpa, .{ .int_type = .{ - .bits = if (enum_type.fields_len == 0) 0 else std.math.log2_int_ceil(u32, enum_type.fields_len), - .signedness = .unsigned, - } }); + const int_tag_type = if (enum_type.tag_ty != .none) + enum_type.tag_ty + else + try ip.get(gpa, .{ .int_type = .{ + .bits = if (enum_type.fields_len == 0) 0 else std.math.log2_int_ceil(u32, enum_type.fields_len), + .signedness = .unsigned, + } }); // We must keep the map in sync with `items`. The hash and equality functions // for enum types only look at the decl field, which is present even in @@ -4029,6 +4025,7 @@ pub fn getIncompleteEnumAuto( const extra_index = ip.addExtraAssumeCapacity(EnumAuto{ .decl = enum_type.decl, .namespace = enum_type.namespace, + .int_tag_type = int_tag_type, .names_map = names_map, .fields_len = enum_type.fields_len, }); @@ -4040,7 +4037,7 @@ pub fn getIncompleteEnumAuto( ip.extra.appendNTimesAssumeCapacity(@enumToInt(Index.none), enum_type.fields_len); return .{ .index = @intToEnum(Index, ip.items.len - 1), - .tag_ty_index = undefined, + .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, .names_map = names_map, .names_start = extra_index + extra_fields_len, .values_map = .none, From 61978c8c9473bc06fa1fde75e37374dd330ed614 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 30 May 2023 00:05:55 -0700 Subject: [PATCH 134/205] InternPool: eliminate indexToKey call graph cycle Recursion makes this hot function more difficult to profile and optimize. I measured a 1.05x speedup vs the previous commit with the (set of passing) behavior tests. This commit was the last in a series, and the main thing it needed to do was make InternPool.typeOf not call indexToKey(). This required adding a type field to the runtime_value encoding even though it is technically redundant. This could have been avoided with a loop inside typeOf, but I wanted to keep the machine code of that hot function as simple as possible. The variable encoding is still responsible for a relatively small slice of the InternPool data size. I added a function that provides the payload type corresponding to the InternPool.Tag type, which allows for some handy inline switch prongs. Let's start moving the structs that are specific to InternPool.Tag into the corresponding namespace. This will provide type safety if the encoding of InternPool changes for these types later. --- src/InternPool.zig | 300 ++++++++++++++++++++++++++++++++++++--------- 1 file changed, 243 insertions(+), 57 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 3fafca0a7451..7debd2c2a36f 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -222,6 +222,11 @@ pub const Key = union(enum) { /// A comptime function call with a memoized result. memoized_call: Key.MemoizedCall, + pub const TypeValue = struct { + ty: Index, + val: Index, + }; + pub const IntType = std.builtin.Type.Int; pub const ErrorUnionType = struct { @@ -1372,7 +1377,7 @@ pub const Index = enum(u32) { }, undef: DataIsIndex, - runtime_value: DataIsIndex, + runtime_value: struct { data: *Tag.TypeValue }, simple_value: struct { data: SimpleValue }, ptr_decl: struct { data: *PtrDecl }, ptr_mut_decl: struct { data: *PtrMutDecl }, @@ -1383,7 +1388,7 @@ pub const Index = enum(u32) { ptr_elem: struct { data: *PtrBaseIndex }, ptr_field: struct { data: *PtrBaseIndex }, ptr_slice: struct { data: *PtrSlice }, - opt_payload: struct { data: *TypeValue }, + opt_payload: struct { data: *Tag.TypeValue }, opt_null: DataIsIndex, int_u8: struct { data: u8 }, int_u16: struct { data: u16 }, @@ -1399,7 +1404,7 @@ pub const Index = enum(u32) { int_lazy_size: struct { data: *IntLazy }, error_set_error: struct { data: *Key.Error }, error_union_error: struct { data: *Key.Error }, - error_union_payload: struct { data: *TypeValue }, + error_union_payload: struct { data: *Tag.TypeValue }, enum_literal: struct { data: NullTerminatedString }, enum_tag: struct { data: *Key.EnumTag }, float_f16: struct { data: f16 }, @@ -1410,7 +1415,7 @@ pub const Index = enum(u32) { float_c_longdouble_f80: struct { data: *Float80 }, float_c_longdouble_f128: struct { data: *Float128 }, float_comptime_float: struct { data: *Float128 }, - variable: struct { data: *Variable }, + variable: struct { data: *Tag.Variable }, extern_func: struct { data: *Key.ExternFunc }, func: struct { data: *Key.Func }, only_possible_value: DataIsIndex, @@ -1769,7 +1774,7 @@ pub const Tag = enum(u8) { undef, /// A wrapper for values which are comptime-known but should /// semantically be runtime-known. - /// `data` is `Index` of the value. + /// data is extra index of `TypeValue`. runtime_value, /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. @@ -1924,8 +1929,117 @@ pub const Tag = enum(u8) { /// data is extra index to `Key.MemoizedDecl` memoized_decl, /// A memoized comptime function call result. - /// data is extra index to `MemoizedFunc` + /// data is extra index to `MemoizedCall` memoized_call, + + const ErrorUnionType = Key.ErrorUnionType; + const OpaqueType = Key.OpaqueType; + const TypeValue = Key.TypeValue; + const Error = Key.Error; + const EnumTag = Key.EnumTag; + const ExternFunc = Key.ExternFunc; + const Func = Key.Func; + const Union = Key.Union; + const MemoizedDecl = Key.MemoizedDecl; + + fn Payload(comptime tag: Tag) type { + return switch (tag) { + .type_int_signed => unreachable, + .type_int_unsigned => unreachable, + .type_array_big => Array, + .type_array_small => Vector, + .type_vector => Vector, + .type_pointer => Pointer, + .type_slice => unreachable, + .type_optional => unreachable, + .type_anyframe => unreachable, + .type_error_union => ErrorUnionType, + .type_error_set => ErrorSet, + .type_inferred_error_set => unreachable, + .type_enum_auto => EnumAuto, + .type_enum_explicit => EnumExplicit, + .type_enum_nonexhaustive => EnumExplicit, + .simple_type => unreachable, + .type_opaque => OpaqueType, + .type_struct => unreachable, + .type_struct_ns => unreachable, + .type_struct_anon => TypeStructAnon, + .type_tuple_anon => TypeStructAnon, + .type_union_tagged => unreachable, + .type_union_untagged => unreachable, + .type_union_safety => unreachable, + .type_function => TypeFunction, + + .undef => unreachable, + .runtime_value => TypeValue, + .simple_value => unreachable, + .ptr_decl => PtrDecl, + .ptr_mut_decl => PtrMutDecl, + .ptr_comptime_field => PtrComptimeField, + .ptr_int => PtrBase, + .ptr_eu_payload => PtrBase, + .ptr_opt_payload => PtrBase, + .ptr_elem => PtrBaseIndex, + .ptr_field => PtrBaseIndex, + .ptr_slice => PtrSlice, + .opt_payload => TypeValue, + .opt_null => unreachable, + .int_u8 => unreachable, + .int_u16 => unreachable, + .int_u32 => unreachable, + .int_i32 => unreachable, + .int_usize => unreachable, + .int_comptime_int_u32 => unreachable, + .int_comptime_int_i32 => unreachable, + .int_small => IntSmall, + .int_positive => unreachable, + .int_negative => unreachable, + .int_lazy_align => IntLazy, + .int_lazy_size => IntLazy, + .error_set_error => Error, + .error_union_error => Error, + .error_union_payload => TypeValue, + .enum_literal => unreachable, + .enum_tag => EnumTag, + .float_f16 => unreachable, + .float_f32 => unreachable, + .float_f64 => unreachable, + .float_f80 => unreachable, + .float_f128 => unreachable, + .float_c_longdouble_f80 => unreachable, + .float_c_longdouble_f128 => unreachable, + .float_comptime_float => unreachable, + .variable => Variable, + .extern_func => ExternFunc, + .func => Func, + .only_possible_value => unreachable, + .union_value => Union, + .bytes => Bytes, + .aggregate => Aggregate, + .repeated => Repeated, + .memoized_decl => MemoizedDecl, + .memoized_call => MemoizedCall, + }; + } + + pub const Variable = struct { + ty: Index, + /// May be `none`. + init: Index, + decl: Module.Decl.Index, + /// Library name if specified. + /// For example `extern "c" var stderrp = ...` would have 'c' as library name. + lib_name: OptionalNullTerminatedString, + flags: Flags, + + pub const Flags = packed struct(u32) { + is_extern: bool, + is_const: bool, + is_threadlocal: bool, + is_weak_linkage: bool, + _: u28 = 0, + }; + }; }; /// Trailing: @@ -2137,11 +2251,6 @@ pub const Array = struct { } }; -pub const TypeValue = struct { - ty: Index, - val: Index, -}; - /// Trailing: /// 0. field name: NullTerminatedString for each fields_len; declaration order /// 1. tag value: Index for each fields_len; declaration order @@ -2190,25 +2299,6 @@ pub const PackedU64 = packed struct(u64) { } }; -pub const Variable = struct { - /// This is a value if has_init is true, otherwise a type. - init: Index, - decl: Module.Decl.Index, - /// Library name if specified. - /// For example `extern "c" var stderrp = ...` would have 'c' as library name. - lib_name: OptionalNullTerminatedString, - flags: Flags, - - pub const Flags = packed struct(u32) { - has_init: bool, - is_extern: bool, - is_const: bool, - is_threadlocal: bool, - is_weak_linkage: bool, - _: u27 = 0, - }; -}; - pub const PtrDecl = struct { ty: Index, decl: Module.Decl.Index, @@ -2569,19 +2659,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .type_function => .{ .func_type = ip.indexToKeyFuncType(data) }, .undef => .{ .undef = @intToEnum(Index, data) }, - .runtime_value => { - const val = @intToEnum(Index, data); - return .{ .runtime_value = .{ - .ty = ip.typeOf(val), - .val = val, - } }; - }, + .runtime_value => .{ .runtime_value = ip.extraData(Tag.TypeValue, data) }, .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), .val = .none, } }, .opt_payload => { - const extra = ip.extraData(TypeValue, data); + const extra = ip.extraData(Tag.TypeValue, data); return .{ .opt = .{ .ty = extra.ty, .val = extra.val, @@ -2806,10 +2890,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .storage = .{ .f128 = ip.extraData(Float128, data).get() }, } }, .variable => { - const extra = ip.extraData(Variable, data); + const extra = ip.extraData(Tag.Variable, data); return .{ .variable = .{ - .ty = if (extra.flags.has_init) ip.typeOf(extra.init) else extra.init, - .init = if (extra.flags.has_init) extra.init else .none, + .ty = extra.ty, + .init = extra.init, .decl = extra.decl, .lib_name = extra.lib_name, .is_extern = extra.flags.is_extern, @@ -2887,7 +2971,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, .error_union_payload => { - const extra = ip.extraData(TypeValue, data); + const extra = ip.extraData(Tag.TypeValue, data); return .{ .error_union = .{ .ty = extra.ty, .val = .{ .payload = extra.val }, @@ -3124,7 +3208,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(runtime_value.ty == ip.typeOf(runtime_value.val)); ip.items.appendAssumeCapacity(.{ .tag = .runtime_value, - .data = @enumToInt(runtime_value.val), + .data = try ip.addExtra(gpa, runtime_value), }); }, @@ -3266,12 +3350,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { if (has_init) assert(variable.ty == ip.typeOf(variable.init)); ip.items.appendAssumeCapacity(.{ .tag = .variable, - .data = try ip.addExtra(gpa, Variable{ - .init = if (has_init) variable.init else variable.ty, + .data = try ip.addExtra(gpa, Tag.Variable{ + .ty = variable.ty, + .init = variable.init, .decl = variable.decl, .lib_name = variable.lib_name, .flags = .{ - .has_init = has_init, .is_extern = variable.is_extern, .is_const = variable.is_const, .is_threadlocal = variable.is_threadlocal, @@ -3431,7 +3515,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = @enumToInt(opt.ty), } else .{ .tag = .opt_payload, - .data = try ip.addExtra(gpa, TypeValue{ + .data = try ip.addExtra(gpa, Tag.TypeValue{ .ty = opt.ty, .val = opt.val, }), @@ -3642,7 +3726,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .payload => |payload| .{ .tag = .error_union_payload, - .data = try ip.addExtra(gpa, TypeValue{ + .data = try ip.addExtra(gpa, Tag.TypeValue{ .ty = error_union.ty, .val = payload, }), @@ -4222,7 +4306,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), Pointer.VectorIndex => @enumToInt(@field(extra, field.name)), - Variable.Flags => @bitCast(u32, @field(extra, field.name)), + Tag.Variable.Flags => @bitCast(u32, @field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), }); } @@ -4290,7 +4374,7 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32), - Variable.Flags => @bitCast(Variable.Flags, int32), + Tag.Variable.Flags => @bitCast(Tag.Variable.Flags, int32), else => @compileError("bad field type: " ++ @typeName(field.type)), }; } @@ -4737,7 +4821,7 @@ pub fn isAggregateType(ip: InternPool, ty: Index) bool { /// The is only legal because the initializer is not part of the hash. pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { assert(ip.items.items(.tag)[@enumToInt(index)] == .variable); - const field_index = inline for (@typeInfo(Variable).Struct.fields, 0..) |field, field_index| { + const field_index = inline for (@typeInfo(Tag.Variable).Struct.fields, 0..) |field, field_index| { if (comptime std.mem.eql(u8, field.name, "init")) break field_index; } else unreachable; ip.extra.items[ip.items.items(.data)[@enumToInt(index)] + field_index] = @enumToInt(init_index); @@ -4847,7 +4931,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { }, .undef => 0, - .runtime_value => 0, + .runtime_value => @sizeOf(Tag.TypeValue), .simple_type => 0, .simple_value => 0, .ptr_decl => @sizeOf(PtrDecl), @@ -4860,7 +4944,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .ptr_field => @sizeOf(PtrBaseIndex), .ptr_slice => @sizeOf(PtrSlice), .opt_null => 0, - .opt_payload => @sizeOf(TypeValue), + .opt_payload => @sizeOf(Tag.TypeValue), .int_u8 => 0, .int_u16 => 0, .int_u32 => 0, @@ -4880,7 +4964,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), .error_set_error, .error_union_error => @sizeOf(Key.Error), - .error_union_payload => @sizeOf(TypeValue), + .error_union_payload => @sizeOf(Tag.TypeValue), .enum_literal => 0, .enum_tag => @sizeOf(Key.EnumTag), @@ -4905,7 +4989,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .float_c_longdouble_f80 => @sizeOf(Float80), .float_c_longdouble_f128 => @sizeOf(Float128), .float_comptime_float => @sizeOf(Float128), - .variable => @sizeOf(Variable) + @sizeOf(Module.Decl), + .variable => @sizeOf(Tag.Variable) + @sizeOf(Module.Decl), .extern_func => @sizeOf(Key.ExternFunc) + @sizeOf(Module.Decl), .func => @sizeOf(Key.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl), .only_possible_value => 0, @@ -5179,6 +5263,7 @@ pub fn typeOf(ip: InternPool, index: Index) Index { .generic_poison_type, .empty_struct_type, => .type_type, + .undef => .undefined_type, .zero, .one, .negative_one => .comptime_int_type, .zero_usize, .one_usize => .usize_type, @@ -5190,8 +5275,109 @@ pub fn typeOf(ip: InternPool, index: Index) Index { .bool_true, .bool_false => .bool_type, .empty_struct => .empty_struct_type, .generic_poison => .generic_poison_type, - .var_args_param_type, .none => unreachable, - _ => ip.indexToKey(index).typeOf(), + + // This optimization on tags is needed so that indexToKey can call + // typeOf without being recursive. + _ => switch (ip.items.items(.tag)[@enumToInt(index)]) { + .type_int_signed, + .type_int_unsigned, + .type_array_big, + .type_array_small, + .type_vector, + .type_pointer, + .type_slice, + .type_optional, + .type_anyframe, + .type_error_union, + .type_error_set, + .type_inferred_error_set, + .type_enum_auto, + .type_enum_explicit, + .type_enum_nonexhaustive, + .simple_type, + .type_opaque, + .type_struct, + .type_struct_ns, + .type_struct_anon, + .type_tuple_anon, + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + .type_function, + => .type_type, + + .undef, + .opt_null, + .only_possible_value, + => @intToEnum(Index, ip.items.items(.data)[@enumToInt(index)]), + + .simple_value => unreachable, // handled via Index above + + inline .ptr_decl, + .ptr_mut_decl, + .ptr_comptime_field, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .error_union_payload, + .runtime_value, + .int_small, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .enum_tag, + .variable, + .extern_func, + .func, + .union_value, + .bytes, + .aggregate, + .repeated, + => |t| { + const extra_index = ip.items.items(.data)[@enumToInt(index)]; + const field_index = std.meta.fieldIndex(t.Payload(), "ty").?; + return @intToEnum(Index, ip.extra.items[extra_index + field_index]); + }, + + .int_u8 => .u8_type, + .int_u16 => .u16_type, + .int_u32 => .u32_type, + .int_i32 => .i32_type, + .int_usize => .usize_type, + + .int_comptime_int_u32, + .int_comptime_int_i32, + => .comptime_int_type, + + // Note these are stored in limbs data, not extra data. + .int_positive, + .int_negative, + => ip.limbData(Int, ip.items.items(.data)[@enumToInt(index)]).ty, + + .enum_literal => .enum_literal_type, + .float_f16 => .f16_type, + .float_f32 => .f32_type, + .float_f64 => .f64_type, + .float_f80 => .f80_type, + .float_f128 => .f128_type, + + .float_c_longdouble_f80, + .float_c_longdouble_f128, + => .c_longdouble_type, + + .float_comptime_float => .comptime_float_type, + + .memoized_decl => unreachable, + .memoized_call => unreachable, + }, + + .var_args_param_type => unreachable, + .none => unreachable, }; } From d0cd1c89da5d688634cdcd3fd645799b14553660 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 30 May 2023 02:18:07 -0400 Subject: [PATCH 135/205] Sema: port lazy value usage to be InternPool aware --- src/Sema.zig | 260 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 187 insertions(+), 73 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index d25b02225450..7b2d8a0faadc 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1915,6 +1915,18 @@ fn resolveConstValue( return sema.failWithNeededComptime(block, src, reason); } +/// Will not return Value Tags: `variable`, `undef`. Instead they will emit compile errors. +/// Lazy values are recursively resolved. +fn resolveConstLazyValue( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, + reason: []const u8, +) CompileError!Value { + return sema.resolveLazyValue(try sema.resolveConstValue(block, src, air_ref, reason)); +} + /// Value Tag `variable` causes this function to return `null`. /// Value Tag `undef` causes this function to return a compile error. fn resolveDefinedValue( @@ -1952,10 +1964,22 @@ fn resolveMaybeUndefVal( } } +/// Value Tag `variable` causes this function to return `null`. +/// Value Tag `undef` causes this function to return the Value. +/// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. +/// Lazy values are recursively resolved. +fn resolveMaybeUndefLazyVal( + sema: *Sema, + inst: Air.Inst.Ref, +) CompileError!?Value { + return try sema.resolveLazyValue((try sema.resolveMaybeUndefVal(inst)) orelse return null); +} + /// Value Tag `variable` results in `null`. /// Value Tag `undef` results in the Value. /// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. /// Value Tag `decl_ref` and `decl_ref_mut` or any nested such value results in `null`. +/// Lazy values are recursively resolved. fn resolveMaybeUndefValIntable( sema: *Sema, inst: Air.Inst.Ref, @@ -1976,8 +2000,7 @@ fn resolveMaybeUndefValIntable( else => break, }, }; - try sema.resolveLazyValue(val); - return val; + return try sema.resolveLazyValue(val); } /// Returns all Value tags including `variable` and `undef`. @@ -5257,8 +5280,7 @@ fn zirCompileLog( const arg = try sema.resolveInst(arg_ref); const arg_ty = sema.typeOf(arg); - if (try sema.resolveMaybeUndefVal(arg)) |val| { - try sema.resolveLazyValue(val); + if (try sema.resolveMaybeUndefLazyVal(arg)) |val| { try writer.print("@as({}, {})", .{ arg_ty.fmt(sema.mod), val.fmtValue(arg_ty, sema.mod), }); @@ -7266,15 +7288,14 @@ fn analyzeInlineCallArg( // parameter or return type. return error.GenericPoison; }, - else => { - // Needed so that lazy values do not trigger - // assertion due to type not being resolved - // when the hash function is called. - try sema.resolveLazyValue(arg_val); - }, + else => {}, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(mod); - memoized_arg_values[arg_i.*] = try arg_val.intern(param_ty.toType(), mod); + // Needed so that lazy values do not trigger + // assertion due to type not being resolved + // when the hash function is called. + const resolved_arg_val = try sema.resolveLazyValue(arg_val); + should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod); + memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(param_ty.toType(), mod); } else { sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg); } @@ -7302,15 +7323,14 @@ fn analyzeInlineCallArg( // parameter or return type. return error.GenericPoison; }, - else => { - // Needed so that lazy values do not trigger - // assertion due to type not being resolved - // when the hash function is called. - try sema.resolveLazyValue(arg_val); - }, + else => {}, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(mod); - memoized_arg_values[arg_i.*] = try arg_val.intern(sema.typeOf(uncasted_arg), mod); + // Needed so that lazy values do not trigger + // assertion due to type not being resolved + // when the hash function is called. + const resolved_arg_val = try sema.resolveLazyValue(arg_val); + should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod); + memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(sema.typeOf(uncasted_arg), mod); } else { if (zir_tags[inst] == .param_anytype_comptime) { _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); @@ -7369,9 +7389,7 @@ fn analyzeGenericCallArg( } fn analyzeGenericCallArgVal(sema: *Sema, block: *Block, arg_src: LazySrcLoc, uncasted_arg: Air.Inst.Ref) !Value { - const arg_val = try sema.resolveValue(block, arg_src, uncasted_arg, "parameter is comptime"); - try sema.resolveLazyValue(arg_val); - return arg_val; + return sema.resolveLazyValue(try sema.resolveValue(block, arg_src, uncasted_arg, "parameter is comptime")); } fn instantiateGenericCall( @@ -7903,7 +7921,8 @@ fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) for (tuple.types, tuple.values) |field_ty, field_val| { try sema.resolveTupleLazyValues(block, src, field_ty.toType()); if (field_val == .none) continue; - try sema.resolveLazyValue(field_val.toValue()); + // TODO: mutate in intern pool + _ = try sema.resolveLazyValue(field_val.toValue()); } } @@ -10104,8 +10123,9 @@ fn zirSwitchCapture( if (block.inline_case_capture != .none) { const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable; + const resolved_item_val = try sema.resolveLazyValue(item_val); if (operand_ty.zigTypeTag(mod) == .Union) { - const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?); + const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(resolved_item_val, sema.mod).?); const union_obj = mod.typeToUnion(operand_ty).?; const field_ty = union_obj.fields.values()[field_index].ty; if (try sema.resolveDefinedValue(block, sema.src, operand_ptr)) |union_val| { @@ -10141,7 +10161,7 @@ fn zirSwitchCapture( return block.addStructFieldVal(operand_ptr, field_index, field_ty); } } else if (is_ref) { - return sema.addConstantMaybeRef(block, operand_ty, item_val, true); + return sema.addConstantMaybeRef(block, operand_ty, resolved_item_val, true); } else { return block.inline_case_capture; } @@ -10249,7 +10269,7 @@ fn zirSwitchCapture( for (items) |item| { const item_ref = try sema.resolveInst(item); // Previous switch validation ensured this will succeed - const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; + const item_val = sema.resolveConstLazyValue(block, .unneeded, item_ref, "") catch unreachable; const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError(mod).?); names.putAssumeCapacityNoClobber(name_ip, {}); } @@ -10259,7 +10279,7 @@ fn zirSwitchCapture( } else { const item_ref = try sema.resolveInst(items[0]); // Previous switch validation ensured this will succeed - const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; + const item_val = sema.resolveConstLazyValue(block, .unneeded, item_ref, "") catch unreachable; const item_ty = try mod.singleErrorSetType(item_val.getError(mod).?); return sema.bitCast(block, item_ty, operand, operand_src, null); @@ -10993,6 +11013,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError defer merges.deinit(gpa); if (try sema.resolveDefinedValue(&child_block, src, operand)) |operand_val| { + const resolved_operand_val = try sema.resolveLazyValue(operand_val); var extra_index: usize = special.end; { var scalar_i: usize = 0; @@ -11007,8 +11028,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = try sema.resolveInst(item_ref); // Validation above ensured these will succeed. - const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable; - if (operand_val.eql(item_val, operand_ty, mod)) { + const item_val = sema.resolveConstLazyValue(&child_block, .unneeded, item, "") catch unreachable; + if (resolved_operand_val.eql(item_val, operand_ty, mod)) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11033,8 +11054,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError for (items) |item_ref| { const item = try sema.resolveInst(item_ref); // Validation above ensured these will succeed. - const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable; - if (operand_val.eql(item_val, operand_ty, mod)) { + const item_val = sema.resolveConstLazyValue(&child_block, .unneeded, item, "") catch unreachable; + if (resolved_operand_val.eql(item_val, operand_ty, mod)) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11052,8 +11073,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // Validation above ensured these will succeed. const first_tv = sema.resolveInstConst(&child_block, .unneeded, item_first, "") catch unreachable; const last_tv = sema.resolveInstConst(&child_block, .unneeded, item_last, "") catch unreachable; - if ((try sema.compareAll(operand_val, .gte, first_tv.val, operand_ty)) and - (try sema.compareAll(operand_val, .lte, last_tv.val, operand_ty))) + if ((try sema.compareAll(resolved_operand_val, .gte, first_tv.val, operand_ty)) and + (try sema.compareAll(resolved_operand_val, .lte, last_tv.val, operand_ty))) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11135,7 +11156,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // `item` is already guaranteed to be constant known. const analyze_body = if (union_originally) blk: { - const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; + const item_val = sema.resolveConstLazyValue(block, .unneeded, item, "") catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, mod); break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; @@ -11683,8 +11704,7 @@ fn resolveSwitchItemVal( // Constructing a LazySrcLoc is costly because we only have the switch AST node. // Only if we know for sure we need to report a compile error do we resolve the // full source locations. - if (sema.resolveConstValue(block, .unneeded, item, "")) |val| { - try sema.resolveLazyValue(val); + if (sema.resolveConstLazyValue(block, .unneeded, item, "")) |val| { return val.toIntern(); } else |err| switch (err) { error.NeededSourceLocation => { @@ -20634,9 +20654,8 @@ fn zirBitCount( } }, .Int => { - if (try sema.resolveMaybeUndefVal(operand)) |val| { + if (try sema.resolveMaybeUndefLazyVal(operand)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(result_scalar_ty); - try sema.resolveLazyValue(val); return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, mod)); } else { try sema.requireRuntimeBlock(block, src, operand_src); @@ -22311,18 +22330,18 @@ fn analyzeMinMax( continue; } - try sema.resolveLazyValue(cur_val); - try sema.resolveLazyValue(operand_val); + const resolved_cur_val = try sema.resolveLazyValue(cur_val); + const resolved_operand_val = try sema.resolveLazyValue(operand_val); const vec_len = simd_op.len orelse { - const result_val = opFunc(cur_val, operand_val, mod); + const result_val = opFunc(resolved_cur_val, resolved_operand_val, mod); cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); continue; }; const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const lhs_elem_val = try cur_val.elemValue(mod, i); - const rhs_elem_val = try operand_val.elemValue(mod, i); + const lhs_elem_val = try resolved_cur_val.elemValue(mod, i); + const rhs_elem_val = try resolved_operand_val.elemValue(mod, i); elem.* = try opFunc(lhs_elem_val, rhs_elem_val, mod).intern(simd_op.scalar_ty, mod); } cur_minmax = try sema.addConstant(simd_op.result_ty, (try mod.intern(.{ .aggregate = .{ @@ -30360,13 +30379,11 @@ fn cmpNumeric( if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { // Compare ints: const vs. undefined (or vice versa) if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef(mod)) { - try sema.resolveLazyValue(lhs_val); - if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } else if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef(mod)) { - try sema.resolveLazyValue(rhs_val); - if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -30389,19 +30406,17 @@ fn cmpNumeric( } else { if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) { // Compare ints: const vs. var - try sema.resolveLazyValue(lhs_val); - if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } break :src rhs_src; } } else { - if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { + if (try sema.resolveMaybeUndefLazyVal(rhs)) |rhs_val| { if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) { // Compare ints: var vs. const - try sema.resolveLazyValue(rhs_val); - if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -30465,8 +30480,7 @@ fn cmpNumeric( var dest_float_type: ?Type = null; var lhs_bits: usize = undefined; - if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| { - try sema.resolveLazyValue(lhs_val); + if (try sema.resolveMaybeUndefLazyVal(lhs)) |lhs_val| { if (lhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); if (lhs_val.isNan(mod)) switch (op) { @@ -30524,8 +30538,7 @@ fn cmpNumeric( } var rhs_bits: usize = undefined; - if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - try sema.resolveLazyValue(rhs_val); + if (try sema.resolveMaybeUndefLazyVal(rhs)) |rhs_val| { if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); if (rhs_val.isNan(mod)) switch (op) { @@ -31467,32 +31480,133 @@ pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileErro /// Make it so that calling hash() and eql() on `val` will not assert due /// to a type not having its layout resolved. -fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { - switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { +fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value { + const mod = sema.mod; + switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { - .u64, .i64, .big_int => {}, - .lazy_align, .lazy_size => |lazy_ty| try sema.resolveTypeLayout(lazy_ty.toType()), + .u64, .i64, .big_int => return val, + .lazy_align, .lazy_size => return (try mod.intern(.{ .int = .{ + .ty = int.ty, + .storage = .{ .u64 = (try val.getUnsignedIntAdvanced(mod, sema)).? }, + } })).toValue(), }, .ptr => |ptr| { + const resolved_len = switch (ptr.len) { + .none => .none, + else => (try sema.resolveLazyValue(ptr.len.toValue())).toIntern(), + }; switch (ptr.addr) { - .decl, .mut_decl => {}, - .int => |int| try sema.resolveLazyValue(int.toValue()), - .eu_payload, .opt_payload => |base| try sema.resolveLazyValue(base.toValue()), - .comptime_field => |comptime_field| try sema.resolveLazyValue(comptime_field.toValue()), - .elem, .field => |base_index| try sema.resolveLazyValue(base_index.base.toValue()), + .decl, .mut_decl => return if (resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = switch (ptr.addr) { + .decl => |decl| .{ .decl = decl }, + .mut_decl => |mut_decl| .{ .mut_decl = mut_decl }, + else => unreachable, + }, + .len = resolved_len, + } })).toValue(), + .comptime_field => |field_val| { + const resolved_field_val = + (try sema.resolveLazyValue(field_val.toValue())).toIntern(); + return if (resolved_field_val == field_val and resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = .{ .comptime_field = resolved_field_val }, + .len = resolved_len, + } })).toValue(); + }, + .int => |int| { + const resolved_int = (try sema.resolveLazyValue(int.toValue())).toIntern(); + return if (resolved_int == int and resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = .{ .int = resolved_int }, + .len = resolved_len, + } })).toValue(); + }, + .eu_payload, .opt_payload => |base| { + const resolved_base = (try sema.resolveLazyValue(base.toValue())).toIntern(); + return if (resolved_base == base and resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = switch (ptr.addr) { + .eu_payload => .{ .eu_payload = resolved_base }, + .opt_payload => .{ .opt_payload = resolved_base }, + else => unreachable, + }, + .len = ptr.len, + } })).toValue(); + }, + .elem, .field => |base_index| { + const resolved_base = (try sema.resolveLazyValue(base_index.base.toValue())).toIntern(); + return if (resolved_base == base_index.base and resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = switch (ptr.addr) { + .elem => .{ .elem = .{ + .base = resolved_base, + .index = base_index.index, + } }, + .field => .{ .field = .{ + .base = resolved_base, + .index = base_index.index, + } }, + else => unreachable, + }, + .len = ptr.len, + } })).toValue(); + }, } - if (ptr.len != .none) try sema.resolveLazyValue(ptr.len.toValue()); }, .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => {}, - .elems => |elems| for (elems) |elem| try sema.resolveLazyValue(elem.toValue()), - .repeated_elem => |elem| try sema.resolveLazyValue(elem.toValue()), + .bytes => return val, + .elems => |elems| { + var resolved_elems: []InternPool.Index = &.{}; + for (elems, 0..) |elem, i| { + const resolved_elem = (try sema.resolveLazyValue(elem.toValue())).toIntern(); + if (resolved_elems.len == 0 and resolved_elem != elem) { + resolved_elems = try sema.arena.alloc(InternPool.Index, elems.len); + @memcpy(resolved_elems[0..i], elems[0..i]); + } + if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem; + } + return if (resolved_elems.len == 0) val else (try mod.intern(.{ .aggregate = .{ + .ty = aggregate.ty, + .storage = .{ .elems = resolved_elems }, + } })).toValue(); + }, + .repeated_elem => |elem| { + const resolved_elem = (try sema.resolveLazyValue(elem.toValue())).toIntern(); + return if (resolved_elem == elem) val else (try mod.intern(.{ .aggregate = .{ + .ty = aggregate.ty, + .storage = .{ .repeated_elem = resolved_elem }, + } })).toValue(); + }, }, .un => |un| { - try sema.resolveLazyValue(un.tag.toValue()); - try sema.resolveLazyValue(un.val.toValue()); + const resolved_tag = (try sema.resolveLazyValue(un.tag.toValue())).toIntern(); + const resolved_val = (try sema.resolveLazyValue(un.val.toValue())).toIntern(); + return if (resolved_tag == un.tag and resolved_val == un.val) + val + else + (try mod.intern(.{ .un = .{ + .ty = un.ty, + .tag = resolved_tag, + .val = resolved_val, + } })).toValue(); }, - else => {}, + else => return val, } } From f673c98a7cd18b552cb4959bc22d1794df34c7ab Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 30 May 2023 03:54:34 -0400 Subject: [PATCH 136/205] Sema: fix sus overflow behavior in RangeSetUnhandledIterator The old code assumed that `intAddScalar` could return a value outside of the range of `ty`, which is problematic for many reasons. The new code (ab)uses the InternPool for speed. --- src/Sema.zig | 87 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 55 insertions(+), 32 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 7b2d8a0faadc..8836e8952821 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -11509,7 +11509,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError while (try it.next()) |cur| { cases_len += 1; - const item_ref = try sema.addConstant(operand_ty, cur); + const item_ref = try sema.addConstant(operand_ty, cur.toValue()); case_block.inline_case_capture = item_ref; case_block.instructions.shrinkRetainingCapacity(0); @@ -11647,47 +11647,70 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } const RangeSetUnhandledIterator = struct { - sema: *Sema, - ty: Type, - cur: Value, - max: Value, + mod: *Module, + cur: ?InternPool.Index, + max: InternPool.Index, + range_i: usize, ranges: []const RangeSet.Range, - range_i: usize = 0, - first: bool = true, + limbs: []math.big.Limb, + + const preallocated_limbs = math.big.int.calcTwosCompLimbCount(128); fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const mod = sema.mod; - const min = try ty.minInt(mod, ty); - const max = try ty.maxInt(mod, ty); - - return RangeSetUnhandledIterator{ - .sema = sema, - .ty = ty, - .cur = min, - .max = max, + const int_type = mod.intern_pool.indexToKey(ty.toIntern()).int_type; + const needed_limbs = math.big.int.calcTwosCompLimbCount(int_type.bits); + return .{ + .mod = mod, + .cur = (try ty.minInt(mod, ty)).toIntern(), + .max = (try ty.maxInt(mod, ty)).toIntern(), + .range_i = 0, .ranges = range_set.ranges.items, + .limbs = if (needed_limbs > preallocated_limbs) + try sema.arena.alloc(math.big.Limb, needed_limbs) + else + &.{}, }; } - fn next(it: *RangeSetUnhandledIterator) !?Value { - while (it.range_i < it.ranges.len) : (it.range_i += 1) { - if (!it.first) { - it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty); - } - it.first = false; - if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first.toValue(), it.ty, it.sema.mod)) { - return it.cur; - } - it.cur = it.ranges[it.range_i].last.toValue(); - } - if (!it.first) { - it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty); + fn addOne(it: *const RangeSetUnhandledIterator, val: InternPool.Index) !?InternPool.Index { + if (val == it.max) return null; + const int = it.mod.intern_pool.indexToKey(val).int; + + switch (int.storage) { + inline .u64, .i64 => |val_int| { + const next_int = @addWithOverflow(val_int, 1); + if (next_int[1] == 0) + return (try it.mod.intValue(int.ty.toType(), next_int[0])).toIntern(); + }, + .big_int => {}, + .lazy_align, .lazy_size => unreachable, } - it.first = false; - if (it.cur.compareScalar(.lte, it.max, it.ty, it.sema.mod)) { - return it.cur; + + var val_space: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const val_bigint = int.storage.toBigInt(&val_space); + + var result_limbs: [preallocated_limbs]math.big.Limb = undefined; + var result_bigint = math.big.int.Mutable.init( + if (it.limbs.len > 0) it.limbs else &result_limbs, + 0, + ); + + result_bigint.addScalar(val_bigint, 1); + return (try it.mod.intValue_big(int.ty.toType(), result_bigint.toConst())).toIntern(); + } + + fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index { + var cur = it.cur orelse return null; + while (it.range_i < it.ranges.len and cur == it.ranges[it.range_i].first) { + defer it.range_i += 1; + cur = (try it.addOne(it.ranges[it.range_i].last)) orelse { + it.cur = null; + return null; + }; } - return null; + it.cur = try it.addOne(cur); + return cur; } }; From 494d4f9b8e54b3a0f6c452e1b7dd28e50dd1e6c0 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 30 May 2023 04:29:23 -0400 Subject: [PATCH 137/205] behavior: update for different inferred error set order Error sets are no longer alphabetically sorted. --- test/behavior/cast.zig | 16 ++++++++-------- test/behavior/type_info.zig | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 594bf683e5bd..e9ebd4476be2 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -746,8 +746,8 @@ test "peer type resolution: disjoint error sets" { try expect(error_set_info == .ErrorSet); try expect(error_set_info.ErrorSet.?.len == 3); try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three")); } { @@ -756,8 +756,8 @@ test "peer type resolution: disjoint error sets" { try expect(error_set_info == .ErrorSet); try expect(error_set_info.ErrorSet.?.len == 3); try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three")); } } @@ -778,8 +778,8 @@ test "peer type resolution: error union and error set" { const error_set_info = @typeInfo(info.ErrorUnion.error_set); try expect(error_set_info.ErrorSet.?.len == 3); try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three")); } { @@ -790,8 +790,8 @@ test "peer type resolution: error union and error set" { const error_set_info = @typeInfo(info.ErrorUnion.error_set); try expect(error_set_info.ErrorSet.?.len == 3); try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three")); } } diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig index 2fdb112a729a..e8199913bcdb 100644 --- a/test/behavior/type_info.zig +++ b/test/behavior/type_info.zig @@ -214,8 +214,8 @@ test "type info: error set merged" { try expect(error_set_info == .ErrorSet); try expect(error_set_info.ErrorSet.?.len == 3); try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three")); - try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two")); + try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three")); } test "type info: enum info" { From 6b81546454f925807d2298a127458741be7239e9 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 30 May 2023 04:57:30 -0400 Subject: [PATCH 138/205] Type: fix `@typeName` for `undefined` --- src/type.zig | 46 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/src/type.zig b/src/type.zig index 0ce242b6163e..f285caff95b1 100644 --- a/src/type.zig +++ b/src/type.zig @@ -288,7 +288,51 @@ pub const Type = struct { } try writer.writeAll("}"); }, - .simple_type => |s| return writer.writeAll(@tagName(s)), + .simple_type => |s| switch (s) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, + => return writer.writeAll(@tagName(s)), + .null, + .undefined, + => try writer.print("@TypeOf({s})", .{@tagName(s)}), + .enum_literal => try writer.print("@TypeOf(.{s})", .{@tagName(s)}), + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + .generic_poison, + => unreachable, + }, .struct_type => |struct_type| { if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { const decl = mod.declPtr(struct_obj.owner_decl); From 90a877f462fce8bee69ad366aac66805a7c00571 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 30 May 2023 13:54:22 -0700 Subject: [PATCH 139/205] InternPool: pass by const pointer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Zig language allows the compiler to make this optimization automatically. We should definitely make the compiler do that, and revert this commit. However, that will not happen in this branch, and I want to continue to explore achieving performance parity with merge-base. So, this commit changes all InternPool parameters to be passed by const pointer rather than by value. I measured a 1.03x ± 0.03 speedup vs the previous commit compiling the (set of passing) behavior tests. Against merge-base, this commit is 1.17x ± 0.04 slower, which is an improvement from the previous measurement of 1.22x ± 0.02. Related issue: #13510 Related issue: #14129 Related issue: #15688 --- src/Air.zig | 8 ++-- src/InternPool.zig | 84 ++++++++++++++++++------------------ src/Liveness.zig | 8 ++-- src/Liveness/Verify.zig | 4 +- src/Module.zig | 2 +- src/Sema.zig | 2 +- src/arch/aarch64/CodeGen.zig | 6 +-- src/arch/arm/CodeGen.zig | 6 +-- src/arch/riscv64/CodeGen.zig | 6 +-- src/arch/sparc64/CodeGen.zig | 6 +-- src/arch/wasm/CodeGen.zig | 6 +-- src/arch/x86_64/CodeGen.zig | 6 +-- src/codegen/c.zig | 6 +-- src/codegen/llvm.zig | 10 ++--- src/codegen/spirv.zig | 6 +-- src/print_air.zig | 2 +- src/type.zig | 20 ++++----- 17 files changed, 94 insertions(+), 94 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 56f7d4cf01b3..b179a3c024ec 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1182,7 +1182,7 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[extra.end..][0..extra.data.body_len]; } -pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: InternPool) Type { +pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: *const InternPool) Type { const ref_int = @enumToInt(inst); if (ref_int < InternPool.static_keys.len) { return InternPool.static_keys[ref_int].typeOf().toType(); @@ -1190,7 +1190,7 @@ pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: InternPool) Type { return air.typeOfIndex(ref_int - ref_start_index, ip); } -pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { +pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: *const InternPool) Type { const datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst]) { .add, @@ -1520,7 +1520,7 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { .interned => return air_datas[inst_index].interned.toValue(), - else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), + else => return air.typeOfIndex(inst_index, &mod.intern_pool).onePossibleValue(mod), } } @@ -1537,7 +1537,7 @@ pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 { /// because it can cause side effects. If an instruction does not need to be /// lowered, and Liveness determines its result is unused, backends should /// avoid lowering it. -pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { +pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { const data = air.instructions.items(.data)[inst]; return switch (air.instructions.items(.tag)[inst]) { .arg, diff --git a/src/InternPool.zig b/src/InternPool.zig index 7debd2c2a36f..ffd72245d5c9 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2992,7 +2992,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }; } -fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { +fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { const type_function = ip.extraDataTrail(TypeFunction, data); const param_types = @ptrCast( []Index, @@ -3015,7 +3015,7 @@ fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { }; } -fn indexToKeyEnum(ip: InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { +fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { const enum_explicit = ip.extraDataTrail(EnumExplicit, data); const names = @ptrCast( []const NullTerminatedString, @@ -3038,7 +3038,7 @@ fn indexToKeyEnum(ip: InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key } }; } -fn indexToKeyBigInt(ip: InternPool, limb_index: u32, positive: bool) Key { +fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key { const int_info = ip.limbData(Int, limb_index); return .{ .int = .{ .ty = int_info.ty, @@ -4351,7 +4351,7 @@ fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { } } -fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { +fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; inline for (fields, 0..) |field, i| { @@ -4384,12 +4384,12 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: }; } -fn extraData(ip: InternPool, comptime T: type, index: usize) T { +fn extraData(ip: *const InternPool, comptime T: type, index: usize) T { return extraDataTrail(ip, T, index).data; } /// Asserts the struct has 32-bit fields and the number of fields is evenly divisible by 2. -fn limbData(ip: InternPool, comptime T: type, index: usize) T { +fn limbData(ip: *const InternPool, comptime T: type, index: usize) T { switch (@sizeOf(Limb)) { @sizeOf(u32) => return extraData(ip, T, index), @sizeOf(u64) => {}, @@ -4413,7 +4413,7 @@ fn limbData(ip: InternPool, comptime T: type, index: usize) T { } /// This function returns the Limb slice that is trailing data after a payload. -fn limbSlice(ip: InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { +fn limbSlice(ip: *const InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { const field_count = @typeInfo(S).Struct.fields.len; switch (@sizeOf(Limb)) { @sizeOf(u32) => { @@ -4433,7 +4433,7 @@ const LimbsAsIndexes = struct { len: u32, }; -fn limbsSliceToIndex(ip: InternPool, limbs: []const Limb) LimbsAsIndexes { +fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes { const host_slice = switch (@sizeOf(Limb)) { @sizeOf(u32) => ip.extra.items, @sizeOf(u64) => ip.limbs.items, @@ -4447,7 +4447,7 @@ fn limbsSliceToIndex(ip: InternPool, limbs: []const Limb) LimbsAsIndexes { } /// This function converts Limb array indexes to a primitive slice type. -fn limbsIndexToSlice(ip: InternPool, limbs: LimbsAsIndexes) []const Limb { +fn limbsIndexToSlice(ip: *const InternPool, limbs: LimbsAsIndexes) []const Limb { return switch (@sizeOf(Limb)) { @sizeOf(u32) => ip.extra.items[limbs.start..][0..limbs.len], @sizeOf(u64) => ip.limbs.items[limbs.start..][0..limbs.len], @@ -4485,7 +4485,7 @@ test "basic usage" { try std.testing.expect(another_array_i32 == array_i32); } -pub fn childType(ip: InternPool, i: Index) Index { +pub fn childType(ip: *const InternPool, i: Index) Index { return switch (ip.indexToKey(i)) { .ptr_type => |ptr_type| ptr_type.elem_type, .vector_type => |vector_type| vector_type.child, @@ -4496,7 +4496,7 @@ pub fn childType(ip: InternPool, i: Index) Index { } /// Given a slice type, returns the type of the ptr field. -pub fn slicePtrType(ip: InternPool, i: Index) Index { +pub fn slicePtrType(ip: *const InternPool, i: Index) Index { switch (i) { .slice_const_u8_type => return .manyptr_const_u8_type, .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, @@ -4510,7 +4510,7 @@ pub fn slicePtrType(ip: InternPool, i: Index) Index { } /// Given a slice value, returns the value of the ptr field. -pub fn slicePtr(ip: InternPool, i: Index) Index { +pub fn slicePtr(ip: *const InternPool, i: Index) Index { const item = ip.items.get(@enumToInt(i)); switch (item.tag) { .ptr_slice => return ip.extraData(PtrSlice, item.data).ptr, @@ -4519,7 +4519,7 @@ pub fn slicePtr(ip: InternPool, i: Index) Index { } /// Given a slice value, returns the value of the len field. -pub fn sliceLen(ip: InternPool, i: Index) Index { +pub fn sliceLen(ip: *const InternPool, i: Index) Index { const item = ip.items.get(@enumToInt(i)); switch (item.tag) { .ptr_slice => return ip.extraData(PtrSlice, item.data).len, @@ -4702,7 +4702,7 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind } }); } -pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex { +pub fn indexToStructType(ip: *const InternPool, val: Index) Module.Struct.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .type_struct) return .none; @@ -4710,7 +4710,7 @@ pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); } -pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex { +pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); switch (tags[@enumToInt(val)]) { @@ -4721,7 +4721,7 @@ pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex { return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional(); } -pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { +pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { assert(val != .none); const tags = ip.items.items(.tag); const datas = ip.items.items(.data); @@ -4731,7 +4731,7 @@ pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { } } -pub fn indexToFunc(ip: InternPool, val: Index) Module.Fn.OptionalIndex { +pub fn indexToFunc(ip: *const InternPool, val: Index) Module.Fn.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .func) return .none; @@ -4739,7 +4739,7 @@ pub fn indexToFunc(ip: InternPool, val: Index) Module.Fn.OptionalIndex { return ip.extraData(Key.Func, datas[@enumToInt(val)]).index.toOptional(); } -pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { +pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .type_inferred_error_set) return .none; @@ -4748,7 +4748,7 @@ pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.Inferre } /// includes .comptime_int_type -pub fn isIntegerType(ip: InternPool, ty: Index) bool { +pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { return switch (ty) { .usize_type, .isize_type, @@ -4769,7 +4769,7 @@ pub fn isIntegerType(ip: InternPool, ty: Index) bool { } /// does not include .enum_literal_type -pub fn isEnumType(ip: InternPool, ty: Index) bool { +pub fn isEnumType(ip: *const InternPool, ty: Index) bool { return switch (ty) { .atomic_order_type, .atomic_rmw_op_type, @@ -4783,35 +4783,35 @@ pub fn isEnumType(ip: InternPool, ty: Index) bool { }; } -pub fn isFunctionType(ip: InternPool, ty: Index) bool { +pub fn isFunctionType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .func_type; } -pub fn isPointerType(ip: InternPool, ty: Index) bool { +pub fn isPointerType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .ptr_type; } -pub fn isOptionalType(ip: InternPool, ty: Index) bool { +pub fn isOptionalType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .opt_type; } /// includes .inferred_error_set_type -pub fn isErrorSetType(ip: InternPool, ty: Index) bool { +pub fn isErrorSetType(ip: *const InternPool, ty: Index) bool { return ty == .anyerror_type or switch (ip.indexToKey(ty)) { .error_set_type, .inferred_error_set_type => true, else => false, }; } -pub fn isInferredErrorSetType(ip: InternPool, ty: Index) bool { +pub fn isInferredErrorSetType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .inferred_error_set_type; } -pub fn isErrorUnionType(ip: InternPool, ty: Index) bool { +pub fn isErrorUnionType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .error_union_type; } -pub fn isAggregateType(ip: InternPool, ty: Index) bool { +pub fn isAggregateType(ip: *const InternPool, ty: Index) bool { return switch (ip.indexToKey(ty)) { .array_type, .vector_type, .anon_struct_type, .struct_type => true, else => false, @@ -4827,11 +4827,11 @@ pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { ip.extra.items[ip.items.items(.data)[@enumToInt(index)] + field_index] = @enumToInt(init_index); } -pub fn dump(ip: InternPool) void { +pub fn dump(ip: *const InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } -fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { +fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { const items_size = (1 + 4) * ip.items.len; const extra_size = 4 * ip.extra.items.len; const limbs_size = 8 * ip.limbs.items.len; @@ -5023,11 +5023,11 @@ pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct { return ip.allocated_structs.at(@enumToInt(index)); } -pub fn structPtrConst(ip: InternPool, index: Module.Struct.Index) *const Module.Struct { +pub fn structPtrConst(ip: *const InternPool, index: Module.Struct.Index) *const Module.Struct { return ip.allocated_structs.at(@enumToInt(index)); } -pub fn structPtrUnwrapConst(ip: InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct { +pub fn structPtrUnwrapConst(ip: *const InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct { return structPtrConst(ip, index.unwrap() orelse return null); } @@ -5035,7 +5035,7 @@ pub fn unionPtr(ip: *InternPool, index: Module.Union.Index) *Module.Union { return ip.allocated_unions.at(@enumToInt(index)); } -pub fn unionPtrConst(ip: InternPool, index: Module.Union.Index) *const Module.Union { +pub fn unionPtrConst(ip: *const InternPool, index: Module.Union.Index) *const Module.Union { return ip.allocated_unions.at(@enumToInt(index)); } @@ -5043,7 +5043,7 @@ pub fn funcPtr(ip: *InternPool, index: Module.Fn.Index) *Module.Fn { return ip.allocated_funcs.at(@enumToInt(index)); } -pub fn funcPtrConst(ip: InternPool, index: Module.Fn.Index) *const Module.Fn { +pub fn funcPtrConst(ip: *const InternPool, index: Module.Fn.Index) *const Module.Fn { return ip.allocated_funcs.at(@enumToInt(index)); } @@ -5051,7 +5051,7 @@ pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.In return ip.allocated_inferred_error_sets.at(@enumToInt(index)); } -pub fn inferredErrorSetPtrConst(ip: InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { +pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { return ip.allocated_inferred_error_sets.at(@enumToInt(index)); } @@ -5182,7 +5182,7 @@ pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString { } } -pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { +pub fn stringToSlice(ip: *const InternPool, s: NullTerminatedString) [:0]const u8 { const string_bytes = ip.string_bytes.items; const start = @enumToInt(s); var end: usize = start; @@ -5190,11 +5190,11 @@ pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { return string_bytes[start..end :0]; } -pub fn stringToSliceUnwrap(ip: InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { +pub fn stringToSliceUnwrap(ip: *const InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { return ip.stringToSlice(s.unwrap() orelse return null); } -pub fn typeOf(ip: InternPool, index: Index) Index { +pub fn typeOf(ip: *const InternPool, index: Index) Index { // This optimization of static keys is required so that typeOf can be called // on static keys that haven't been added yet during static key initialization. // An alternative would be to topological sort the static keys, but this would @@ -5382,12 +5382,12 @@ pub fn typeOf(ip: InternPool, index: Index) Index { } /// Assumes that the enum's field indexes equal its value tags. -pub fn toEnum(ip: InternPool, comptime E: type, i: Index) E { +pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E { const int = ip.indexToKey(i).enum_tag.int; return @intToEnum(E, ip.indexToKey(int).int.storage.u64); } -pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { +pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 { return switch (ip.indexToKey(ty)) { .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, @@ -5397,7 +5397,7 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { }; } -pub fn aggregateTypeLenIncludingSentinel(ip: InternPool, ty: Index) u64 { +pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { return switch (ip.indexToKey(ty)) { .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, @@ -5407,7 +5407,7 @@ pub fn aggregateTypeLenIncludingSentinel(ip: InternPool, ty: Index) u64 { }; } -pub fn isNoReturn(ip: InternPool, ty: Index) bool { +pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { return switch (ty) { .noreturn_type => true, else => switch (ip.indexToKey(ty)) { @@ -5420,7 +5420,7 @@ pub fn isNoReturn(ip: InternPool, ty: Index) bool { /// This is a particularly hot function, so we operate directly on encodings /// rather than the more straightforward implementation of calling `indexToKey`. -pub fn zigTypeTagOrPoison(ip: InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId { +pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId { return switch (index) { .u1_type, .u8_type, diff --git a/src/Liveness.zig b/src/Liveness.zig index 4f3d87d3c200..b12b6382082e 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -225,7 +225,7 @@ pub fn categorizeOperand( air: Air, inst: Air.Inst.Index, operand: Air.Inst.Index, - ip: InternPool, + ip: *const InternPool, ) OperandCategory { const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); @@ -1139,7 +1139,7 @@ fn analyzeInst( .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { @@ -1291,7 +1291,7 @@ fn analyzeOperands( // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (!immediate_death or a.air.mustLower(inst, ip.*)) { + if (!immediate_death or a.air.mustLower(inst, ip)) { // Note that it's important we iterate over the operands backwards, so that if a dying // operand is used multiple times we mark its last use as its death. var i = operands.len; @@ -1837,7 +1837,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip.*)) return; + if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip)) return; const extra_byte = (big.operands_remaining - (bpi - 1)) / 31; const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31); diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index e8b024eb6f0d..a5fc59289422 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -32,7 +32,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { const tag = self.air.instructions.items(.tag); const data = self.air.instructions.items(.data); for (body) |inst| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) { // This instruction will not be lowered and should be ignored. continue; } @@ -325,7 +325,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .aggregate_init => { const ty_pl = data[inst].ty_pl; const aggregate_ty = self.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); var bt = self.liveness.iterateBigTomb(inst); diff --git a/src/Module.zig b/src/Module.zig index ffc6a95fe17f..b1a74932d33f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6726,7 +6726,7 @@ pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { } pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { - const info = Type.ptrInfoIp(mod.intern_pool, ptr_ty.toIntern()); + const info = Type.ptrInfoIp(&mod.intern_pool, ptr_ty.toIntern()); return mod.ptrType(.{ .elem_type = new_child.toIntern(), diff --git a/src/Sema.zig b/src/Sema.zig index 8836e8952821..b4e07d749ee1 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -33624,7 +33624,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - return sema.getTmpAir().typeOf(inst, sema.mod.intern_pool); + return sema.getTmpAir().typeOf(inst, &sema.mod.intern_pool); } pub fn getTmpAir(sema: Sema) Air { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 5874440e504c..d01a93dd0d31 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -660,7 +660,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -6412,10 +6412,10 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 360f52cb30b7..69a156999b3a 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -644,7 +644,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -6317,10 +6317,10 @@ fn parseRegName(name: []const u8) ?Register { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5417650dd58c..809c388532d8 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -478,7 +478,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -2737,10 +2737,10 @@ fn parseRegName(name: []const u8) ?Register { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 3bcdd5ad25b3..fde5424ddce0 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -498,7 +498,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -4883,10 +4883,10 @@ fn wantSafety(self: *Self) bool { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index af2b37312d15..e397cf29f8e3 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2076,7 +2076,7 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { const ip = &mod.intern_pool; for (body) |inst| { - if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip.*)) { + if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) { continue; } const old_bookkeeping_value = func.air_bookkeeping; @@ -7436,10 +7436,10 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type { const mod = func.bin_file.base.options.module.?; - return func.air.typeOf(inst, mod.intern_pool); + return func.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type { const mod = func.bin_file.base.options.module.?; - return func.air.typeOfIndex(inst, mod.intern_pool); + return func.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index dbb3d977b86c..b9cc3f705218 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1738,7 +1738,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { try self.mir_to_air_map.put(self.gpa, mir_inst, inst); } - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; wip_mir_log.debug("{}", .{self.fmtAir(inst)}); verbose_tracking_log.debug("{}", .{self.fmtTracking()}); @@ -11992,10 +11992,10 @@ fn hasAllFeatures(self: *Self, features: anytype) bool { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d705d6143ef0..0db223c6b6c7 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -489,12 +489,12 @@ pub const Function = struct { fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { const mod = f.object.dg.module; - return f.air.typeOf(inst, mod.intern_pool); + return f.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type { const mod = f.object.dg.module; - return f.air.typeOfIndex(inst, mod.intern_pool); + return f.air.typeOfIndex(inst, &mod.intern_pool); } }; @@ -2808,7 +2808,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, const air_tags = f.air.instructions.items(.tag); for (body) |inst| { - if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip.*)) + if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip)) continue; const result_value = switch (air_tags[inst]) { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 606c57b187be..8cf6a51ba106 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1574,7 +1574,7 @@ pub const Object = struct { }, .Pointer => { // Normalize everything that the debug info does not represent. - const ptr_info = Type.ptrInfoIp(mod.intern_pool, ty.toIntern()); + const ptr_info = Type.ptrInfoIp(&mod.intern_pool, ty.toIntern()); if (ptr_info.sentinel != .none or ptr_info.address_space != .generic or @@ -4330,7 +4330,7 @@ pub const FuncGen = struct { const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const opt_value: ?*llvm.Value = switch (air_tags[inst]) { @@ -8055,7 +8055,7 @@ pub const FuncGen = struct { const mod = fg.dg.module; const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { - switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip.*)) { + switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) { .none => continue, .write, .noret, .complex => return false, .tomb => return true, @@ -9920,12 +9920,12 @@ pub const FuncGen = struct { fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { const mod = fg.dg.module; - return fg.air.typeOf(inst, mod.intern_pool); + return fg.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { const mod = fg.dg.module; - return fg.air.typeOfIndex(inst, mod.intern_pool); + return fg.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 0fbcb47f7101..ddd7f36435cd 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1688,7 +1688,7 @@ pub const DeclGen = struct { const mod = self.module; const ip = &mod.intern_pool; // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) return; const air_tags = self.air.instructions.items(.tag); @@ -3339,11 +3339,11 @@ pub const DeclGen = struct { fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type { const mod = self.module; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type { const mod = self.module; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/print_air.zig b/src/print_air.zig index be7bc9610d8e..8da80e1360cc 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -978,6 +978,6 @@ const Writer = struct { fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type { const mod = w.module; - return w.air.typeOfIndex(inst, mod.intern_pool); + return w.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/type.zig b/src/type.zig index f285caff95b1..fc7821b50b18 100644 --- a/src/type.zig +++ b/src/type.zig @@ -102,7 +102,7 @@ pub const Type = struct { }; } - pub fn ptrInfoIp(ip: InternPool, ty: InternPool.Index) InternPool.Key.PtrType { + pub fn ptrInfoIp(ip: *const InternPool, ty: InternPool.Index) InternPool.Key.PtrType { return switch (ip.indexToKey(ty)) { .ptr_type => |p| p, .opt_type => |child| switch (ip.indexToKey(child)) { @@ -114,7 +114,7 @@ pub const Type = struct { } pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { - return Payload.Pointer.Data.fromKey(ptrInfoIp(mod.intern_pool, ty.toIntern())); + return Payload.Pointer.Data.fromKey(ptrInfoIp(&mod.intern_pool, ty.toIntern())); } pub fn eql(a: Type, b: Type, mod: *const Module) bool { @@ -1832,10 +1832,10 @@ pub const Type = struct { } pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { - return isVolatilePtrIp(ty, mod.intern_pool); + return isVolatilePtrIp(ty, &mod.intern_pool); } - pub fn isVolatilePtrIp(ty: Type, ip: InternPool) bool { + pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool { return switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| ptr_type.is_volatile, else => false, @@ -1920,10 +1920,10 @@ pub const Type = struct { /// For *T, returns T. /// For [*]T, returns T. pub fn childType(ty: Type, mod: *const Module) Type { - return childTypeIp(ty, mod.intern_pool); + return childTypeIp(ty, &mod.intern_pool); } - pub fn childTypeIp(ty: Type, ip: InternPool) Type { + pub fn childTypeIp(ty: Type, ip: *const InternPool) Type { return ip.childType(ty.toIntern()).toType(); } @@ -2164,10 +2164,10 @@ pub const Type = struct { /// Asserts the type is an array or vector or struct. pub fn arrayLen(ty: Type, mod: *const Module) u64 { - return arrayLenIp(ty, mod.intern_pool); + return arrayLenIp(ty, &mod.intern_pool); } - pub fn arrayLenIp(ty: Type, ip: InternPool) u64 { + pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 { return switch (ip.indexToKey(ty.toIntern())) { .vector_type => |vector_type| vector_type.len, .array_type => |array_type| array_type.len, @@ -2385,10 +2385,10 @@ pub const Type = struct { /// Asserts the type is a function or a function pointer. pub fn fnReturnType(ty: Type, mod: *Module) Type { - return fnReturnTypeIp(ty, mod.intern_pool); + return fnReturnTypeIp(ty, &mod.intern_pool); } - pub fn fnReturnTypeIp(ty: Type, ip: InternPool) Type { + pub fn fnReturnTypeIp(ty: Type, ip: *const InternPool) Type { return switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type, .func_type => |func_type| func_type.return_type, From c7d65fa3685a5f48cfedaa7a1adf758e1dc6d219 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 30 May 2023 18:26:39 -0700 Subject: [PATCH 140/205] std.hash: add xxhash to benchmark and fix its API --- lib/std/hash/benchmark.zig | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig index cf2f18d22f8a..a3fc6c25748f 100644 --- a/lib/std/hash/benchmark.zig +++ b/lib/std/hash/benchmark.zig @@ -38,6 +38,16 @@ const hashes = [_]Hash{ .name = "wyhash", .init_u64 = 0, }, + Hash{ + .ty = hash.XxHash64, + .name = "xxhash64", + .init_u64 = 0, + }, + Hash{ + .ty = hash.XxHash32, + .name = "xxhash32", + .init_u64 = 0, + }, Hash{ .ty = hash.Fnv1a_64, .name = "fnv1a", From 82f6f164a1af6557451e580dcf3197ad94e5437e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 30 May 2023 20:23:51 -0700 Subject: [PATCH 141/205] InternPool: improve hashing performance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Key.PtrType is now an extern struct so that hashing it can be done by reinterpreting bytes directly. It also uses the same representation for type_pointer Tag encoding and the Key. Accessing pointer attributes now requires packed struct access, however, many operations are now a copy of a u32 rather than several independent fields. This function moves the top two most used Key variants - pointer types and pointer values - to use a single-shot hash function that branches for small keys instead of calling memcpy. As a result, perf against merge-base went from 1.17x ± 0.04 slower to 1.12x ± 0.04 slower. After the pointer value hashing was changed, total CPU instructions spent in memcpy went from 4.40% to 4.08%, and after additionally improving pointer type hashing, it further decreased to 3.72%. --- src/InternPool.zig | 484 +++++++++++++++++++++++++------------------ src/Module.zig | 69 +++--- src/Sema.zig | 176 +++++++++------- src/codegen.zig | 4 +- src/codegen/c.zig | 10 +- src/codegen/llvm.zig | 64 +++--- src/type.zig | 108 +++++----- src/value.zig | 4 +- 8 files changed, 527 insertions(+), 392 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index ffd72245d5c9..0b98dfcae501 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -249,35 +249,47 @@ pub const Key = union(enum) { } }; - pub const PtrType = struct { - elem_type: Index, + /// Extern layout so it can be hashed with `std.mem.asBytes`. + pub const PtrType = extern struct { + child: Index, sentinel: Index = .none, - /// `none` indicates the ABI alignment of the pointee_type. In this - /// case, this field *must* be set to `none`, otherwise the - /// `InternPool` equality and hashing functions will return incorrect - /// results. - alignment: Alignment = .none, - /// If this is non-zero it means the pointer points to a sub-byte - /// range of data, which is backed by a "host integer" with this - /// number of bytes. - /// When host_size=pointee_abi_size and bit_offset=0, this must be - /// represented with host_size=0 instead. - host_size: u16 = 0, - bit_offset: u16 = 0, - vector_index: VectorIndex = .none, - size: std.builtin.Type.Pointer.Size = .One, - is_const: bool = false, - is_volatile: bool = false, - is_allowzero: bool = false, - /// See src/target.zig defaultAddressSpace function for how to obtain - /// an appropriate value for this field. - address_space: std.builtin.AddressSpace = .generic, + flags: Flags = .{}, + packed_offset: PackedOffset = .{ .bit_offset = 0, .host_size = 0 }, pub const VectorIndex = enum(u16) { none = std.math.maxInt(u16), runtime = std.math.maxInt(u16) - 1, _, }; + + pub const Flags = packed struct(u32) { + size: Size = .One, + /// `none` indicates the ABI alignment of the pointee_type. In this + /// case, this field *must* be set to `none`, otherwise the + /// `InternPool` equality and hashing functions will return incorrect + /// results. + alignment: Alignment = .none, + is_const: bool = false, + is_volatile: bool = false, + is_allowzero: bool = false, + /// See src/target.zig defaultAddressSpace function for how to obtain + /// an appropriate value for this field. + address_space: AddressSpace = .generic, + vector_index: VectorIndex = .none, + }; + + pub const PackedOffset = packed struct(u32) { + /// If this is non-zero it means the pointer points to a sub-byte + /// range of data, which is backed by a "host integer" with this + /// number of bytes. + /// When host_size=pointee_abi_size and bit_offset=0, this must be + /// represented with host_size=0 instead. + host_size: u16, + bit_offset: u16, + }; + + pub const Size = std.builtin.Type.Pointer.Size; + pub const AddressSpace = std.builtin.AddressSpace; }; pub const ArrayType = struct { @@ -635,17 +647,13 @@ pub const Key = union(enum) { } pub fn hash64(key: Key, ip: *const InternPool) u64 { - var hasher = std.hash.Wyhash.init(0); - key.hashWithHasher(&hasher, ip); - return hasher.final(); - } - - pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash, ip: *const InternPool) void { + const asBytes = std.mem.asBytes; const KeyTag = @typeInfo(Key).Union.tag_type.?; - std.hash.autoHash(hasher, @as(KeyTag, key)); + const seed = @enumToInt(@as(KeyTag, key)); switch (key) { + .ptr_type => |x| return WyhashKing.hash(seed, asBytes(&x)), + inline .int_type, - .ptr_type, .array_type, .vector_type, .opt_type, @@ -663,73 +671,110 @@ pub const Key = union(enum) { .enum_literal, .enum_tag, .inferred_error_set_type, - => |info| std.hash.autoHash(hasher, info), + => |info| { + var hasher = std.hash.Wyhash.init(seed); + std.hash.autoHash(&hasher, info); + return hasher.final(); + }, - .runtime_value => |runtime_value| std.hash.autoHash(hasher, runtime_value.val), - .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), - .enum_type => |enum_type| std.hash.autoHash(hasher, enum_type.decl), + .runtime_value => |runtime_value| { + var hasher = std.hash.Wyhash.init(seed); + std.hash.autoHash(&hasher, runtime_value.val); + return hasher.final(); + }, + .opaque_type => |opaque_type| { + var hasher = std.hash.Wyhash.init(seed); + std.hash.autoHash(&hasher, opaque_type.decl); + return hasher.final(); + }, + .enum_type => |enum_type| { + var hasher = std.hash.Wyhash.init(seed); + std.hash.autoHash(&hasher, enum_type.decl); + return hasher.final(); + }, - .variable => |variable| std.hash.autoHash(hasher, variable.decl), + .variable => |variable| { + var hasher = std.hash.Wyhash.init(seed); + std.hash.autoHash(&hasher, variable.decl); + return hasher.final(); + }, .extern_func => |extern_func| { - std.hash.autoHash(hasher, extern_func.ty); - std.hash.autoHash(hasher, extern_func.decl); + var hasher = std.hash.Wyhash.init(seed); + std.hash.autoHash(&hasher, extern_func.ty); + std.hash.autoHash(&hasher, extern_func.decl); + return hasher.final(); }, .func => |func| { - std.hash.autoHash(hasher, func.ty); - std.hash.autoHash(hasher, func.index); + var hasher = std.hash.Wyhash.init(seed); + std.hash.autoHash(&hasher, func.ty); + std.hash.autoHash(&hasher, func.index); + return hasher.final(); }, .int => |int| { + var hasher = std.hash.Wyhash.init(seed); // Canonicalize all integers by converting them to BigIntConst. switch (int.storage) { .u64, .i64, .big_int => { var buffer: Key.Int.Storage.BigIntSpace = undefined; const big_int = int.storage.toBigInt(&buffer); - std.hash.autoHash(hasher, int.ty); - std.hash.autoHash(hasher, big_int.positive); - for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb); + std.hash.autoHash(&hasher, int.ty); + std.hash.autoHash(&hasher, big_int.positive); + for (big_int.limbs) |limb| std.hash.autoHash(&hasher, limb); }, .lazy_align, .lazy_size => |lazy_ty| { std.hash.autoHash( - hasher, + &hasher, @as(@typeInfo(Key.Int.Storage).Union.tag_type.?, int.storage), ); - std.hash.autoHash(hasher, lazy_ty); + std.hash.autoHash(&hasher, lazy_ty); }, } + return hasher.final(); }, .float => |float| { - std.hash.autoHash(hasher, float.ty); + var hasher = std.hash.Wyhash.init(seed); + std.hash.autoHash(&hasher, float.ty); switch (float.storage) { inline else => |val| std.hash.autoHash( - hasher, + &hasher, @bitCast(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), val), ), } + return hasher.final(); }, .ptr => |ptr| { - std.hash.autoHash(hasher, ptr.ty); - std.hash.autoHash(hasher, ptr.len); // Int-to-ptr pointers are hashed separately than decl-referencing pointers. // This is sound due to pointer provenance rules. - std.hash.autoHash(hasher, @as(@typeInfo(Key.Ptr.Addr).Union.tag_type.?, ptr.addr)); - switch (ptr.addr) { - .decl => |decl| std.hash.autoHash(hasher, decl), - .mut_decl => |mut_decl| std.hash.autoHash(hasher, mut_decl), - .int => |int| std.hash.autoHash(hasher, int), - .eu_payload => |eu_payload| std.hash.autoHash(hasher, eu_payload), - .opt_payload => |opt_payload| std.hash.autoHash(hasher, opt_payload), - .comptime_field => |comptime_field| std.hash.autoHash(hasher, comptime_field), - .elem => |elem| std.hash.autoHash(hasher, elem), - .field => |field| std.hash.autoHash(hasher, field), - } + const addr: @typeInfo(Key.Ptr.Addr).Union.tag_type.? = ptr.addr; + const seed2 = seed + @enumToInt(addr); + const common = asBytes(&ptr.ty) ++ asBytes(&ptr.len); + return switch (ptr.addr) { + .decl => |x| WyhashKing.hash(seed2, common ++ asBytes(&x)), + + .mut_decl => |x| WyhashKing.hash( + seed2, + asBytes(&x.decl) ++ asBytes(&x.runtime_index), + ), + + .int, .eu_payload, .opt_payload, .comptime_field => |int| WyhashKing.hash( + seed2, + asBytes(&int), + ), + + .elem, .field => |x| WyhashKing.hash( + seed2, + asBytes(&x.base) ++ asBytes(&x.index), + ), + }; }, .aggregate => |aggregate| { - std.hash.autoHash(hasher, aggregate.ty); + var hasher = std.hash.Wyhash.init(seed); + std.hash.autoHash(&hasher, aggregate.ty); const len = ip.aggregateTypeLen(aggregate.ty); const child = switch (ip.indexToKey(aggregate.ty)) { .array_type => |array_type| array_type.child, @@ -741,16 +786,16 @@ pub const Key = union(enum) { if (child == .u8_type) { switch (aggregate.storage) { .bytes => |bytes| for (bytes[0..@intCast(usize, len)]) |byte| { - std.hash.autoHash(hasher, KeyTag.int); - std.hash.autoHash(hasher, byte); + std.hash.autoHash(&hasher, KeyTag.int); + std.hash.autoHash(&hasher, byte); }, .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| { const elem_key = ip.indexToKey(elem); - std.hash.autoHash(hasher, @as(KeyTag, elem_key)); + std.hash.autoHash(&hasher, @as(KeyTag, elem_key)); switch (elem_key) { .undef => {}, .int => |int| std.hash.autoHash( - hasher, + &hasher, @intCast(u8, int.storage.u64), ), else => unreachable, @@ -760,11 +805,11 @@ pub const Key = union(enum) { const elem_key = ip.indexToKey(elem); var remaining = len; while (remaining > 0) : (remaining -= 1) { - std.hash.autoHash(hasher, @as(KeyTag, elem_key)); + std.hash.autoHash(&hasher, @as(KeyTag, elem_key)); switch (elem_key) { .undef => {}, .int => |int| std.hash.autoHash( - hasher, + &hasher, @intCast(u8, int.storage.u64), ), else => unreachable, @@ -772,47 +817,60 @@ pub const Key = union(enum) { } }, } - return; + return hasher.final(); } switch (aggregate.storage) { .bytes => unreachable, .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| - std.hash.autoHash(hasher, elem), + std.hash.autoHash(&hasher, elem), .repeated_elem => |elem| { var remaining = len; - while (remaining > 0) : (remaining -= 1) std.hash.autoHash(hasher, elem); + while (remaining > 0) : (remaining -= 1) std.hash.autoHash(&hasher, elem); }, } + return hasher.final(); }, .error_set_type => |error_set_type| { - for (error_set_type.names) |elem| std.hash.autoHash(hasher, elem); + var hasher = std.hash.Wyhash.init(seed); + for (error_set_type.names) |elem| std.hash.autoHash(&hasher, elem); + return hasher.final(); }, .anon_struct_type => |anon_struct_type| { - for (anon_struct_type.types) |elem| std.hash.autoHash(hasher, elem); - for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem); - for (anon_struct_type.names) |elem| std.hash.autoHash(hasher, elem); + var hasher = std.hash.Wyhash.init(seed); + for (anon_struct_type.types) |elem| std.hash.autoHash(&hasher, elem); + for (anon_struct_type.values) |elem| std.hash.autoHash(&hasher, elem); + for (anon_struct_type.names) |elem| std.hash.autoHash(&hasher, elem); + return hasher.final(); }, .func_type => |func_type| { - for (func_type.param_types) |param_type| std.hash.autoHash(hasher, param_type); - std.hash.autoHash(hasher, func_type.return_type); - std.hash.autoHash(hasher, func_type.comptime_bits); - std.hash.autoHash(hasher, func_type.noalias_bits); - std.hash.autoHash(hasher, func_type.alignment); - std.hash.autoHash(hasher, func_type.cc); - std.hash.autoHash(hasher, func_type.is_var_args); - std.hash.autoHash(hasher, func_type.is_generic); - std.hash.autoHash(hasher, func_type.is_noinline); + var hasher = std.hash.Wyhash.init(seed); + for (func_type.param_types) |param_type| std.hash.autoHash(&hasher, param_type); + std.hash.autoHash(&hasher, func_type.return_type); + std.hash.autoHash(&hasher, func_type.comptime_bits); + std.hash.autoHash(&hasher, func_type.noalias_bits); + std.hash.autoHash(&hasher, func_type.alignment); + std.hash.autoHash(&hasher, func_type.cc); + std.hash.autoHash(&hasher, func_type.is_var_args); + std.hash.autoHash(&hasher, func_type.is_generic); + std.hash.autoHash(&hasher, func_type.is_noinline); + return hasher.final(); }, - .memoized_decl => |memoized_decl| std.hash.autoHash(hasher, memoized_decl.val), + .memoized_decl => |memoized_decl| { + var hasher = std.hash.Wyhash.init(seed); + std.hash.autoHash(&hasher, memoized_decl.val); + return hasher.final(); + }, .memoized_call => |memoized_call| { - std.hash.autoHash(hasher, memoized_call.func); - for (memoized_call.arg_values) |arg| std.hash.autoHash(hasher, arg); + var hasher = std.hash.Wyhash.init(seed); + std.hash.autoHash(&hasher, memoized_call.func); + for (memoized_call.arg_values) |arg| std.hash.autoHash(&hasher, arg); + return hasher.final(); }, } } @@ -1340,7 +1398,7 @@ pub const Index = enum(u32) { type_array_big: struct { data: *Array }, type_array_small: struct { data: *Vector }, type_vector: struct { data: *Vector }, - type_pointer: struct { data: *Pointer }, + type_pointer: struct { data: *Tag.TypePointer }, type_slice: DataIsIndex, type_optional: DataIsIndex, type_anyframe: DataIsIndex, @@ -1564,44 +1622,56 @@ pub const static_keys = [_]Key{ .{ .simple_type = .type_info }, .{ .ptr_type = .{ - .elem_type = .u8_type, - .size = .Many, + .child = .u8_type, + .flags = .{ + .size = .Many, + }, } }, // manyptr_const_u8_type .{ .ptr_type = .{ - .elem_type = .u8_type, - .size = .Many, - .is_const = true, + .child = .u8_type, + .flags = .{ + .size = .Many, + .is_const = true, + }, } }, // manyptr_const_u8_sentinel_0_type .{ .ptr_type = .{ - .elem_type = .u8_type, + .child = .u8_type, .sentinel = .zero_u8, - .size = .Many, - .is_const = true, + .flags = .{ + .size = .Many, + .is_const = true, + }, } }, .{ .ptr_type = .{ - .elem_type = .comptime_int_type, - .size = .One, - .is_const = true, + .child = .comptime_int_type, + .flags = .{ + .size = .One, + .is_const = true, + }, } }, // slice_const_u8_type .{ .ptr_type = .{ - .elem_type = .u8_type, - .size = .Slice, - .is_const = true, + .child = .u8_type, + .flags = .{ + .size = .Slice, + .is_const = true, + }, } }, // slice_const_u8_sentinel_0_type .{ .ptr_type = .{ - .elem_type = .u8_type, + .child = .u8_type, .sentinel = .zero_u8, - .size = .Slice, - .is_const = true, + .flags = .{ + .size = .Slice, + .is_const = true, + }, } }, // anyerror_void_error_union_type @@ -1702,7 +1772,6 @@ pub const Tag = enum(u8) { /// data is payload to Vector. type_vector, /// A fully explicitly specified pointer type. - /// data is payload to Pointer. type_pointer, /// A slice type. /// data is Index of underlying pointer type. @@ -1941,6 +2010,7 @@ pub const Tag = enum(u8) { const Func = Key.Func; const Union = Key.Union; const MemoizedDecl = Key.MemoizedDecl; + const TypePointer = Key.PtrType; fn Payload(comptime tag: Tag) type { return switch (tag) { @@ -1949,7 +2019,7 @@ pub const Tag = enum(u8) { .type_array_big => Array, .type_array_small => Vector, .type_vector => Vector, - .type_pointer => Pointer, + .type_pointer => TypePointer, .type_slice => unreachable, .type_optional => unreachable, .type_anyframe => unreachable, @@ -2167,32 +2237,6 @@ pub const SimpleValue = enum(u32) { generic_poison, }; -pub const Pointer = struct { - child: Index, - sentinel: Index, - flags: Flags, - packed_offset: PackedOffset, - - pub const Flags = packed struct(u32) { - size: Size, - alignment: Alignment, - is_const: bool, - is_volatile: bool, - is_allowzero: bool, - address_space: AddressSpace, - vector_index: VectorIndex, - }; - - pub const PackedOffset = packed struct(u32) { - host_size: u16, - bit_offset: u16, - }; - - pub const Size = std.builtin.Type.Pointer.Size; - pub const AddressSpace = std.builtin.AddressSpace; - pub const VectorIndex = Key.PtrType.VectorIndex; -}; - /// Stored as a power-of-two, with one special value to indicate none. pub const Alignment = enum(u6) { none = std.math.maxInt(u6), @@ -2531,39 +2575,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, - .type_pointer => { - const ptr_info = ip.extraData(Pointer, data); - return .{ .ptr_type = .{ - .elem_type = ptr_info.child, - .sentinel = ptr_info.sentinel, - .alignment = ptr_info.flags.alignment, - .size = ptr_info.flags.size, - .is_const = ptr_info.flags.is_const, - .is_volatile = ptr_info.flags.is_volatile, - .is_allowzero = ptr_info.flags.is_allowzero, - .address_space = ptr_info.flags.address_space, - .vector_index = ptr_info.flags.vector_index, - .host_size = ptr_info.packed_offset.host_size, - .bit_offset = ptr_info.packed_offset.bit_offset, - } }; - }, + .type_pointer => .{ .ptr_type = ip.extraData(Tag.TypePointer, data) }, .type_slice => { assert(ip.items.items(.tag)[data] == .type_pointer); - const ptr_info = ip.extraData(Pointer, ip.items.items(.data)[data]); - return .{ .ptr_type = .{ - .elem_type = ptr_info.child, - .sentinel = ptr_info.sentinel, - .alignment = ptr_info.flags.alignment, - .size = .Slice, - .is_const = ptr_info.flags.is_const, - .is_volatile = ptr_info.flags.is_volatile, - .is_allowzero = ptr_info.flags.is_allowzero, - .address_space = ptr_info.flags.address_space, - .vector_index = ptr_info.flags.vector_index, - .host_size = ptr_info.packed_offset.host_size, - .bit_offset = ptr_info.packed_offset.bit_offset, - } }; + var ptr_info = ip.extraData(Tag.TypePointer, ip.items.items(.data)[data]); + ptr_info.flags.size = .Slice; + return .{ .ptr_type = ptr_info }; }, .type_optional => .{ .opt_type = @intToEnum(Index, data) }, @@ -3066,13 +3084,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, .ptr_type => |ptr_type| { - assert(ptr_type.elem_type != .none); - assert(ptr_type.sentinel == .none or ip.typeOf(ptr_type.sentinel) == ptr_type.elem_type); + assert(ptr_type.child != .none); + assert(ptr_type.sentinel == .none or ip.typeOf(ptr_type.sentinel) == ptr_type.child); - if (ptr_type.size == .Slice) { + if (ptr_type.flags.size == .Slice) { _ = ip.map.pop(); var new_key = key; - new_key.ptr_type.size = .Many; + new_key.ptr_type.flags.size = .Many; const ptr_type_index = try ip.get(gpa, new_key); assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); @@ -3083,27 +3101,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } - const is_allowzero = ptr_type.is_allowzero or ptr_type.size == .C; + var ptr_type_adjusted = ptr_type; + if (ptr_type.flags.size == .C) ptr_type_adjusted.flags.is_allowzero = true; ip.items.appendAssumeCapacity(.{ .tag = .type_pointer, - .data = try ip.addExtra(gpa, Pointer{ - .child = ptr_type.elem_type, - .sentinel = ptr_type.sentinel, - .flags = .{ - .alignment = ptr_type.alignment, - .is_const = ptr_type.is_const, - .is_volatile = ptr_type.is_volatile, - .is_allowzero = is_allowzero, - .size = ptr_type.size, - .address_space = ptr_type.address_space, - .vector_index = ptr_type.vector_index, - }, - .packed_offset = .{ - .host_size = ptr_type.host_size, - .bit_offset = ptr_type.bit_offset, - }, - }), + .data = try ip.addExtra(gpa, ptr_type_adjusted), }); }, .array_type => |array_type| { @@ -3379,7 +3382,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const ptr_type = ip.indexToKey(ptr.ty).ptr_type; switch (ptr.len) { .none => { - assert(ptr_type.size != .Slice); + assert(ptr_type.flags.size != .Slice); switch (ptr.addr) { .decl => |decl| ip.items.appendAssumeCapacity(.{ .tag = .ptr_decl, @@ -3410,10 +3413,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { switch (ptr.addr) { .int => assert(ip.typeOf(base) == .usize_type), .eu_payload => assert(ip.indexToKey( - ip.indexToKey(ip.typeOf(base)).ptr_type.elem_type, + ip.indexToKey(ip.typeOf(base)).ptr_type.child, ) == .error_union_type), .opt_payload => assert(ip.indexToKey( - ip.indexToKey(ip.typeOf(base)).ptr_type.elem_type, + ip.indexToKey(ip.typeOf(base)).ptr_type.child, ) == .opt_type), else => unreachable, } @@ -3433,10 +3436,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .elem, .field => |base_index| { const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type; switch (ptr.addr) { - .elem => assert(base_ptr_type.size == .Many), + .elem => assert(base_ptr_type.flags.size == .Many), .field => { - assert(base_ptr_type.size == .One); - switch (ip.indexToKey(base_ptr_type.elem_type)) { + assert(base_ptr_type.flags.size == .One); + switch (ip.indexToKey(base_ptr_type.child)) { .anon_struct_type => |anon_struct_type| { assert(ptr.addr == .field); assert(base_index.index < anon_struct_type.types.len); @@ -3451,7 +3454,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .ptr_type => |slice_type| { assert(ptr.addr == .field); - assert(slice_type.size == .Slice); + assert(slice_type.flags.size == .Slice); assert(base_index.index < 2); }, else => unreachable, @@ -3485,12 +3488,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { // TODO: change Key.Ptr for slices to reference the manyptr value // rather than having an addr field directly. Then we can avoid // these problematic calls to pop(), get(), and getOrPutAdapted(). - assert(ptr_type.size == .Slice); + assert(ptr_type.flags.size == .Slice); _ = ip.map.pop(); var new_key = key; new_key.ptr.ty = ip.slicePtrType(ptr.ty); new_key.ptr.len = .none; - assert(ip.indexToKey(new_key.ptr.ty).ptr_type.size == .Many); + assert(ip.indexToKey(new_key.ptr.ty).ptr_type.flags.size == .Many); const ptr_index = try ip.get(gpa, new_key); assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); @@ -4302,10 +4305,10 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { NullTerminatedString => @enumToInt(@field(extra, field.name)), OptionalNullTerminatedString => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), - Pointer.Flags => @bitCast(u32, @field(extra, field.name)), + Tag.TypePointer.Flags => @bitCast(u32, @field(extra, field.name)), TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), - Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), - Pointer.VectorIndex => @enumToInt(@field(extra, field.name)), + Tag.TypePointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), + Tag.TypePointer.VectorIndex => @enumToInt(@field(extra, field.name)), Tag.Variable.Flags => @bitCast(u32, @field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), }); @@ -4370,10 +4373,10 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct NullTerminatedString => @intToEnum(NullTerminatedString, int32), OptionalNullTerminatedString => @intToEnum(OptionalNullTerminatedString, int32), i32 => @bitCast(i32, int32), - Pointer.Flags => @bitCast(Pointer.Flags, int32), + Tag.TypePointer.Flags => @bitCast(Tag.TypePointer.Flags, int32), TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), - Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), - Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32), + Tag.TypePointer.PackedOffset => @bitCast(Tag.TypePointer.PackedOffset, int32), + Tag.TypePointer.VectorIndex => @intToEnum(Tag.TypePointer.VectorIndex, int32), Tag.Variable.Flags => @bitCast(Tag.Variable.Flags, int32), else => @compileError("bad field type: " ++ @typeName(field.type)), }; @@ -4487,7 +4490,7 @@ test "basic usage" { pub fn childType(ip: *const InternPool, i: Index) Index { return switch (ip.indexToKey(i)) { - .ptr_type => |ptr_type| ptr_type.elem_type, + .ptr_type => |ptr_type| ptr_type.child, .vector_type => |vector_type| vector_type.child, .array_type => |array_type| array_type.child, .opt_type, .anyframe_type => |child| child, @@ -4559,7 +4562,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al return ip.get(gpa, .{ .ptr = .{ .ty = new_ty, .addr = .{ .int = .zero_usize }, - .len = switch (ip.indexToKey(new_ty).ptr_type.size) { + .len = switch (ip.indexToKey(new_ty).ptr_type.flags.size) { .One, .Many, .C => .none, .Slice => try ip.get(gpa, .{ .undef = .usize_type }), }, @@ -4623,7 +4626,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .none => try ip.get(gpa, .{ .ptr = .{ .ty = new_ty, .addr = .{ .int = .zero_usize }, - .len = switch (ip.indexToKey(new_ty).ptr_type.size) { + .len = switch (ip.indexToKey(new_ty).ptr_type.flags.size) { .One, .Many, .C => .none, .Slice => try ip.get(gpa, .{ .undef = .usize_type }), }, @@ -4889,7 +4892,7 @@ fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .type_array_small => @sizeOf(Vector), .type_array_big => @sizeOf(Array), .type_vector => @sizeOf(Vector), - .type_pointer => @sizeOf(Pointer), + .type_pointer => @sizeOf(Tag.TypePointer), .type_slice => 0, .type_optional => 0, .type_anyframe => 0, @@ -5007,6 +5010,7 @@ fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { const values = ctx.map.values(); return values[a_index].bytes > values[b_index].bytes; + //return values[a_index].count > values[b_index].count; } }; counts.sort(SortContext{ .map = &counts }); @@ -5621,3 +5625,79 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .none => unreachable, // special tag }; } + +/// I got this from King, using this temporarily until std lib hashing can be +/// improved to make stateless hashing performant. Currently the +/// implementations suffer from not special casing small lengths and not taking +/// advantage of comptime-known lengths, both of which this implementation +/// does. +const WyhashKing = struct { + inline fn mum(pair: *[2]u64) void { + const x = @as(u128, pair[0]) *% pair[1]; + pair[0] = @truncate(u64, x); + pair[1] = @truncate(u64, x >> 64); + } + + inline fn mix(a: u64, b: u64) u64 { + var pair = [_]u64{ a, b }; + mum(&pair); + return pair[0] ^ pair[1]; + } + + inline fn read(comptime I: type, in: []const u8) I { + return std.mem.readIntLittle(I, in[0..@sizeOf(I)]); + } + + const secret = [_]u64{ + 0xa0761d6478bd642f, + 0xe7037ed1a0b428db, + 0x8ebc6af09c88c6e3, + 0x589965cc75374cc3, + }; + + fn hash(seed: u64, input: anytype) u64 { + var in: []const u8 = input; + var last = std.mem.zeroes([2]u64); + const starting_len: u64 = input.len; + var state = seed ^ mix(seed ^ secret[0], secret[1]); + + if (in.len <= 16) { + if (in.len >= 4) { + const end = (in.len >> 3) << 2; + last[0] = (@as(u64, read(u32, in)) << 32) | read(u32, in[end..]); + last[1] = (@as(u64, read(u32, in[in.len - 4 ..])) << 32) | read(u32, in[in.len - 4 - end ..]); + } else if (in.len > 0) { + last[0] = (@as(u64, in[0]) << 16) | (@as(u64, in[in.len >> 1]) << 8) | in[in.len - 1]; + } + } else { + large: { + if (in.len <= 48) break :large; + var split = [_]u64{ state, state, state }; + while (true) { + for (&split, 0..) |*lane, i| { + const a = read(u64, in[(i * 2) * 8 ..]) ^ secret[i + 1]; + const b = read(u64, in[((i * 2) + 1) * 8 ..]) ^ lane.*; + lane.* = mix(a, b); + } + in = in[48..]; + if (in.len > 48) continue; + state = split[0] ^ (split[1] ^ split[2]); + break :large; + } + } + while (true) { + if (in.len <= 16) break; + state = mix(read(u64, in) ^ secret[1], read(u64, in[8..]) ^ state); + in = in[16..]; + if (in.len <= 16) break; + } + last[0] = read(u64, in[in.len - 16 ..]); + last[1] = read(u64, in[in.len - 8 ..]); + } + + last[0] ^= secret[1]; + last[1] ^= state; + mum(&last); + return mix(last[0] ^ secret[0] ^ starting_len, last[1] ^ secret[1]); + } +}; diff --git a/src/Module.zig b/src/Module.zig index b1a74932d33f..862025d8f907 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6430,8 +6430,10 @@ pub fn populateTestFunctions( // func try mod.intern(.{ .ptr = .{ .ty = try mod.intern(.{ .ptr_type = .{ - .elem_type = test_decl.ty.toIntern(), - .is_const = true, + .child = test_decl.ty.toIntern(), + .flags = .{ + .is_const = true, + }, } }), .addr = .{ .decl = test_decl_index }, } }), @@ -6466,9 +6468,11 @@ pub fn populateTestFunctions( { const new_ty = try mod.ptrType(.{ - .elem_type = test_fn_ty.toIntern(), - .is_const = true, - .size = .Slice, + .child = test_fn_ty.toIntern(), + .flags = .{ + .is_const = true, + .size = .Slice, + }, }); const new_val = decl.val; const new_init = try mod.intern(.{ .ptr = .{ @@ -6681,65 +6685,68 @@ pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error! pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type { var canon_info = info; - const have_elem_layout = info.elem_type.toType().layoutIsResolved(mod); + const have_elem_layout = info.child.toType().layoutIsResolved(mod); - if (info.size == .C) canon_info.is_allowzero = true; + if (info.flags.size == .C) canon_info.flags.is_allowzero = true; // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee // type, we change it to 0 here. If this causes an assertion trip because the // pointee type needs to be resolved more, that needs to be done before calling // this ptr() function. - if (info.alignment.toByteUnitsOptional()) |info_align| { - if (have_elem_layout and info_align == info.elem_type.toType().abiAlignment(mod)) { - canon_info.alignment = .none; + if (info.flags.alignment.toByteUnitsOptional()) |info_align| { + if (have_elem_layout and info_align == info.child.toType().abiAlignment(mod)) { + canon_info.flags.alignment = .none; } } - switch (info.vector_index) { + switch (info.flags.vector_index) { // Canonicalize host_size. If it matches the bit size of the pointee type, // we change it to 0 here. If this causes an assertion trip, the pointee type // needs to be resolved before calling this ptr() function. - .none => if (have_elem_layout and info.host_size != 0) { - const elem_bit_size = info.elem_type.toType().bitSize(mod); - assert(info.bit_offset + elem_bit_size <= info.host_size * 8); - if (info.host_size * 8 == elem_bit_size) { - canon_info.host_size = 0; + .none => if (have_elem_layout and info.packed_offset.host_size != 0) { + const elem_bit_size = info.child.toType().bitSize(mod); + assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8); + if (info.packed_offset.host_size * 8 == elem_bit_size) { + canon_info.packed_offset.host_size = 0; } }, .runtime => {}, - _ => assert(@enumToInt(info.vector_index) < info.host_size), + _ => assert(@enumToInt(info.flags.vector_index) < info.packed_offset.host_size), } return (try intern(mod, .{ .ptr_type = canon_info })).toType(); } pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - return ptrType(mod, .{ .elem_type = child_type.toIntern() }); + return ptrType(mod, .{ .child = child_type.toIntern() }); } pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - return ptrType(mod, .{ .elem_type = child_type.toIntern(), .is_const = true }); + return ptrType(mod, .{ + .child = child_type.toIntern(), + .flags = .{ + .is_const = true, + }, + }); } pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - return ptrType(mod, .{ .elem_type = child_type.toIntern(), .size = .Many, .is_const = true }); + return ptrType(mod, .{ + .child = child_type.toIntern(), + .flags = .{ + .size = .Many, + .is_const = true, + }, + }); } pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { const info = Type.ptrInfoIp(&mod.intern_pool, ptr_ty.toIntern()); return mod.ptrType(.{ - .elem_type = new_child.toIntern(), - + .child = new_child.toIntern(), .sentinel = info.sentinel, - .alignment = info.alignment, - .host_size = info.host_size, - .bit_offset = info.bit_offset, - .vector_index = info.vector_index, - .size = info.size, - .is_const = info.is_const, - .is_volatile = info.is_volatile, - .is_allowzero = info.is_allowzero, - .address_space = info.address_space, + .flags = info.flags, + .packed_offset = info.packed_offset, }); } diff --git a/src/Sema.zig b/src/Sema.zig index b4e07d749ee1..9c458cc433c9 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2490,9 +2490,11 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const operand = try trash_block.addBitCast(pointee_ty, .void_value); const ptr_ty = try mod.ptrType(.{ - .elem_type = pointee_ty.toIntern(), - .alignment = ia1.alignment, - .address_space = addr_space, + .child = pointee_ty.toIntern(), + .flags = .{ + .alignment = ia1.alignment, + .address_space = addr_space, + }, }); const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); @@ -2519,9 +2521,11 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE try sema.resolveTypeLayout(pointee_ty); } const ptr_ty = try mod.ptrType(.{ - .elem_type = pointee_ty.toIntern(), - .alignment = alignment, - .address_space = addr_space, + .child = pointee_ty.toIntern(), + .flags = .{ + .alignment = alignment, + .address_space = addr_space, + }, }); try sema.maybeQueueFuncBodyAnalysis(decl_index); return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{ @@ -3771,10 +3775,12 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com if (iac.is_const) try decl.intern(mod); const final_elem_ty = decl.ty; const final_ptr_ty = try mod.ptrType(.{ - .elem_type = final_elem_ty.toIntern(), - .is_const = false, - .alignment = iac.alignment, - .address_space = target_util.defaultAddressSpace(target, .local), + .child = final_elem_ty.toIntern(), + .flags = .{ + .is_const = false, + .alignment = iac.alignment, + .address_space = target_util.defaultAddressSpace(target, .local), + }, }); try sema.maybeQueueFuncBodyAnalysis(decl_index); @@ -3797,9 +3803,11 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none); const final_ptr_ty = try mod.ptrType(.{ - .elem_type = final_elem_ty.toIntern(), - .alignment = ia1.alignment, - .address_space = target_util.defaultAddressSpace(target, .local), + .child = final_elem_ty.toIntern(), + .flags = .{ + .alignment = ia1.alignment, + .address_space = target_util.defaultAddressSpace(target, .local), + }, }); if (!ia1.is_const) { @@ -3916,9 +3924,11 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com defer trash_block.instructions.deinit(gpa); const mut_final_ptr_ty = try mod.ptrType(.{ - .elem_type = final_elem_ty.toIntern(), - .alignment = ia1.alignment, - .address_space = target_util.defaultAddressSpace(target, .local), + .child = final_elem_ty.toIntern(), + .flags = .{ + .alignment = ia1.alignment, + .address_space = target_util.defaultAddressSpace(target, .local), + }, }); const dummy_ptr = try trash_block.addTy(.alloc, mut_final_ptr_ty); const empty_trash_count = trash_block.instructions.items.len; @@ -12038,7 +12048,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const has_field = hf: { switch (ip.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| switch (ptr_type.size) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice => { if (mem.eql(u8, field_name, "ptr")) break :hf true; if (mem.eql(u8, field_name, "len")) break :hf true; @@ -16019,9 +16029,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = param_info_ty.toIntern(), - .size = .Slice, - .is_const = true, + .child = param_info_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, })).toIntern(), .addr = .{ .decl = new_decl }, .len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(), @@ -16329,9 +16341,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Build our ?[]const Error value const slice_errors_ty = try mod.ptrType(.{ - .elem_type = error_field_ty.toIntern(), - .size = .Slice, - .is_const = true, + .child = error_field_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, }); const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.toIntern()); const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: { @@ -16471,9 +16485,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = enum_field_ty.toIntern(), - .size = .Slice, - .is_const = true, + .child = enum_field_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, })).toIntern(), .addr = .{ .decl = new_decl }, .len = (try mod.intValue(Type.usize, enum_field_vals.len)).toIntern(), @@ -16614,9 +16630,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = union_field_ty.toIntern(), - .size = .Slice, - .is_const = true, + .child = union_field_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, })).toIntern(), .addr = .{ .decl = new_decl }, .len = (try mod.intValue(Type.usize, union_field_vals.len)).toIntern(), @@ -16833,9 +16851,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = struct_field_ty.toIntern(), - .size = .Slice, - .is_const = true, + .child = struct_field_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, })).toIntern(), .addr = .{ .decl = new_decl }, .len = (try mod.intValue(Type.usize, struct_field_vals.len)).toIntern(), @@ -16976,9 +16996,11 @@ fn typeInfoDecls( ); return try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = declaration_ty.toIntern(), - .size = .Slice, - .is_const = true, + .child = declaration_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, })).toIntern(), .addr = .{ .decl = new_decl }, .len = (try mod.intValue(Type.usize, decl_vals.items.len)).toIntern(), @@ -18047,16 +18069,20 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } const ty = try mod.ptrType(.{ - .elem_type = elem_ty.toIntern(), + .child = elem_ty.toIntern(), .sentinel = sentinel, - .alignment = abi_align, - .address_space = address_space, - .bit_offset = bit_offset, - .host_size = host_size, - .is_const = !inst_data.flags.is_mutable, - .is_allowzero = inst_data.flags.is_allowzero, - .is_volatile = inst_data.flags.is_volatile, - .size = inst_data.size, + .flags = .{ + .alignment = abi_align, + .address_space = address_space, + .is_const = !inst_data.flags.is_mutable, + .is_allowzero = inst_data.flags.is_allowzero, + .is_volatile = inst_data.flags.is_volatile, + .size = inst_data.size, + }, + .packed_offset = .{ + .bit_offset = bit_offset, + .host_size = host_size, + }, }); return sema.addType(ty); } @@ -19209,14 +19235,16 @@ fn zirReify( } const ty = try mod.ptrType(.{ - .size = ptr_size, - .is_const = is_const_val.toBool(), - .is_volatile = is_volatile_val.toBool(), - .alignment = abi_align, - .address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val), - .elem_type = elem_ty.toIntern(), - .is_allowzero = is_allowzero_val.toBool(), + .child = elem_ty.toIntern(), .sentinel = actual_sentinel, + .flags = .{ + .size = ptr_size, + .is_const = is_const_val.toBool(), + .is_volatile = is_volatile_val.toBool(), + .alignment = abi_align, + .address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val), + .is_allowzero = is_allowzero_val.toBool(), + }, }); return sema.addType(ty); }, @@ -22714,9 +22742,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty) else if (new_dest_ptr_ty.ptrSize(mod) == .One) ptr: { var dest_manyptr_ty_key = mod.intern_pool.indexToKey(new_dest_ptr_ty.toIntern()).ptr_type; - assert(dest_manyptr_ty_key.size == .One); - dest_manyptr_ty_key.elem_type = dest_elem_ty.toIntern(); - dest_manyptr_ty_key.size = .Many; + assert(dest_manyptr_ty_key.flags.size == .One); + dest_manyptr_ty_key.child = dest_elem_ty.toIntern(); + dest_manyptr_ty_key.flags.size = .Many; break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src); } else new_dest_ptr; @@ -22725,9 +22753,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty) else if (new_src_ptr_ty.ptrSize(mod) == .One) ptr: { var src_manyptr_ty_key = mod.intern_pool.indexToKey(new_src_ptr_ty.toIntern()).ptr_type; - assert(src_manyptr_ty_key.size == .One); - src_manyptr_ty_key.elem_type = src_elem_ty.toIntern(); - src_manyptr_ty_key.size = .Many; + assert(src_manyptr_ty_key.flags.size == .One); + src_manyptr_ty_key.child = src_elem_ty.toIntern(); + src_manyptr_ty_key.flags.size = .Many; break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(src_manyptr_ty_key), new_src_ptr, src_src); } else new_src_ptr; @@ -24036,8 +24064,10 @@ fn panicWithMsg( const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const target = mod.getTarget(); const ptr_stack_trace_ty = try mod.ptrType(.{ - .elem_type = stack_trace_ty.toIntern(), - .address_space = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic + .child = stack_trace_ty.toIntern(), + .flags = .{ + .address_space = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic + }, }); const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern()); const null_stack_trace = try sema.addConstant(opt_ptr_stack_trace_ty, (try mod.intern(.{ .opt = .{ @@ -29630,10 +29660,12 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); const ptr_ty = try mod.ptrType(.{ - .elem_type = decl_tv.ty.toIntern(), - .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), - .is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else true, - .address_space = decl.@"addrspace", + .child = decl_tv.ty.toIntern(), + .flags = .{ + .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), + .is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else true, + .address_space = decl.@"addrspace", + }, }); if (analyze_fn_body) { try sema.maybeQueueFuncBodyAnalysis(decl_index); @@ -30025,10 +30057,10 @@ fn analyzeSlice( try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty) else if (array_ty.zigTypeTag(mod) == .Array) ptr: { var manyptr_ty_key = mod.intern_pool.indexToKey(slice_ty.toIntern()).ptr_type; - assert(manyptr_ty_key.elem_type == array_ty.toIntern()); - assert(manyptr_ty_key.size == .One); - manyptr_ty_key.elem_type = elem_ty.toIntern(); - manyptr_ty_key.size = .Many; + assert(manyptr_ty_key.child == array_ty.toIntern()); + assert(manyptr_ty_key.flags.size == .One); + manyptr_ty_key.child = elem_ty.toIntern(); + manyptr_ty_key.flags.size = .Many; break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src); } else ptr_or_slice; @@ -31972,7 +32004,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => false, .ptr_type => |ptr_type| { - const child_ty = ptr_type.elem_type.toType(); + const child_ty = ptr_type.child.toType(); if (child_ty.zigTypeTag(mod) == .Fn) { return mod.typeToFunc(child_ty).?.is_generic; } else { @@ -33917,15 +33949,15 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { const mod = sema.mod; return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| switch (ptr_type.size) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .One, .Many, .C => ty, .Slice => null, }, .opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) { - .ptr_type => |ptr_type| switch (ptr_type.size) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice, .C => null, .Many, .One => { - if (ptr_type.is_allowzero) return null; + if (ptr_type.flags.is_allowzero) return null; // optionals of zero sized types behave like bools, not pointers const payload_ty = opt_child.toType(); @@ -33956,7 +33988,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => return false, .ptr_type => |ptr_type| { - const child_ty = ptr_type.elem_type.toType(); + const child_ty = ptr_type.child.toType(); if (child_ty.zigTypeTag(mod) == .Fn) { return mod.typeToFunc(child_ty).?.is_generic; } else { diff --git a/src/codegen.zig b/src/codegen.zig index 983d895991f9..7fd432dcebee 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -673,7 +673,7 @@ fn lowerParentPtr( mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))), ), .field => |field| { - const base_type = mod.intern_pool.indexToKey(mod.intern_pool.typeOf(field.base)).ptr_type.elem_type; + const base_type = mod.intern_pool.indexToKey(mod.intern_pool.typeOf(field.base)).ptr_type.child; return lowerParentPtr( bin_file, src_loc, @@ -681,7 +681,7 @@ fn lowerParentPtr( code, debug_output, reloc_info.offset(switch (mod.intern_pool.indexToKey(base_type)) { - .ptr_type => |ptr_type| switch (ptr_type.size) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .One, .Many, .C => unreachable, .Slice => switch (field.index) { 0 => 0, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 0db223c6b6c7..56f6c669dfe2 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -630,7 +630,7 @@ pub const DeclGen = struct { try writer.writeByte(')'); } try writer.writeAll("&("); - if (mod.intern_pool.indexToKey(ptr_base_ty.toIntern()).ptr_type.size == .One) + if (mod.intern_pool.indexToKey(ptr_base_ty.toIntern()).ptr_type.flags.size == .One) try writer.writeByte('*'); try dg.renderParentPtr(writer, elem.base, location); try writer.print(")[{d}]", .{elem.index}); @@ -642,7 +642,7 @@ pub const DeclGen = struct { _ = try dg.typeToIndex(base_ty, .complete); const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) { .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(field.index, mod), - .ptr_type => |ptr_type| switch (ptr_type.size) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .One, .Many, .C => unreachable, .Slice => switch (field.index) { Value.slice_ptr_index => base_ty.slicePtrFieldType(mod), @@ -6285,8 +6285,10 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { // casted to a regular pointer, otherwise an error like this occurs: // error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable const elem_ptr_ty = try mod.ptrType(.{ - .size = .C, - .elem_type = elem_ty.ip_index, + .child = elem_ty.ip_index, + .flags = .{ + .size = .C, + }, }); const index = try f.allocLocal(inst, Type.usize); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8cf6a51ba106..8b78c4067a2e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1577,25 +1577,27 @@ pub const Object = struct { const ptr_info = Type.ptrInfoIp(&mod.intern_pool, ty.toIntern()); if (ptr_info.sentinel != .none or - ptr_info.address_space != .generic or - ptr_info.bit_offset != 0 or - ptr_info.host_size != 0 or - ptr_info.vector_index != .none or - ptr_info.is_allowzero or - ptr_info.is_const or - ptr_info.is_volatile or - ptr_info.size == .Many or ptr_info.size == .C or - !ptr_info.elem_type.toType().hasRuntimeBitsIgnoreComptime(mod)) + ptr_info.flags.address_space != .generic or + ptr_info.packed_offset.bit_offset != 0 or + ptr_info.packed_offset.host_size != 0 or + ptr_info.flags.vector_index != .none or + ptr_info.flags.is_allowzero or + ptr_info.flags.is_const or + ptr_info.flags.is_volatile or + ptr_info.flags.size == .Many or ptr_info.flags.size == .C or + !ptr_info.child.toType().hasRuntimeBitsIgnoreComptime(mod)) { const bland_ptr_ty = try mod.ptrType(.{ - .elem_type = if (!ptr_info.elem_type.toType().hasRuntimeBitsIgnoreComptime(mod)) + .child = if (!ptr_info.child.toType().hasRuntimeBitsIgnoreComptime(mod)) .anyopaque_type else - ptr_info.elem_type, - .alignment = ptr_info.alignment, - .size = switch (ptr_info.size) { - .Many, .C, .One => .One, - .Slice => .Slice, + ptr_info.child, + .flags = .{ + .alignment = ptr_info.flags.alignment, + .size = switch (ptr_info.flags.size) { + .Many, .C, .One => .One, + .Slice => .Slice, + }, }, }); const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve); @@ -1683,7 +1685,7 @@ pub const Object = struct { return full_di_ty; } - const elem_di_ty = try o.lowerDebugType(ptr_info.elem_type.toType(), .fwd); + const elem_di_ty = try o.lowerDebugType(ptr_info.child.toType(), .fwd); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); const ptr_di_ty = dib.createPointerType( @@ -5856,8 +5858,10 @@ pub const FuncGen = struct { const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ - .elem_type = llvm_field.ty.toIntern(), - .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), + .child = llvm_field.ty.toIntern(), + .flags = .{ + .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), + }, }); if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) @@ -6732,8 +6736,10 @@ pub const FuncGen = struct { const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ - .elem_type = llvm_field.ty.toIntern(), - .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), + .child = llvm_field.ty.toIntern(), + .flags = .{ + .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), + }, }); return self.load(field_ptr, field_ptr_ty); } @@ -9131,10 +9137,12 @@ pub const FuncGen = struct { indices[1] = llvm_u32.constInt(llvm_i, .False); const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); const field_ptr_ty = try mod.ptrType(.{ - .elem_type = self.typeOf(elem).toIntern(), - .alignment = InternPool.Alignment.fromNonzeroByteUnits( - result_ty.structFieldAlign(i, mod), - ), + .child = self.typeOf(elem).toIntern(), + .flags = .{ + .alignment = InternPool.Alignment.fromNonzeroByteUnits( + result_ty.structFieldAlign(i, mod), + ), + }, }); try self.store(field_ptr, field_ptr_ty, llvm_elem, .NotAtomic); } @@ -9160,7 +9168,7 @@ pub const FuncGen = struct { const array_info = result_ty.arrayInfo(mod); const elem_ptr_ty = try mod.ptrType(.{ - .elem_type = array_info.elem_type.toIntern(), + .child = array_info.elem_type.toIntern(), }); for (elements, 0..) |elem, i| { @@ -9282,8 +9290,10 @@ pub const FuncGen = struct { const index_type = self.context.intType(32); const field_ptr_ty = try mod.ptrType(.{ - .elem_type = field.ty.toIntern(), - .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align), + .child = field.ty.toIntern(), + .flags = .{ + .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align), + }, }); if (layout.tag_size == 0) { const indices: [3]*llvm.Value = .{ diff --git a/src/type.zig b/src/type.zig index fc7821b50b18..fdac8430f3bc 100644 --- a/src/type.zig +++ b/src/type.zig @@ -85,7 +85,7 @@ pub const Type = struct { /// Asserts the type is a pointer. pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { - return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.is_const; + return !mod.intern_pool.indexToKey(ty.toIntern()).ptr_type.flags.is_const; } pub const ArrayInfo = struct { @@ -488,7 +488,7 @@ pub const Type = struct { // Pointers to zero-bit types still have a runtime address; however, pointers // to comptime-only types do not, with the exception of function pointers. if (ignore_comptime_only) return true; - const child_ty = ptr_type.elem_type.toType(); + const child_ty = ptr_type.child.toType(); if (child_ty.zigTypeTag(mod) == .Fn) return !mod.typeToFunc(child_ty).?.is_generic; if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty)); return !comptimeOnly(ty, mod); @@ -689,7 +689,7 @@ pub const Type = struct { .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), .opt_type => ty.isPtrLikeOptional(mod), - .ptr_type => |ptr_type| ptr_type.size != .Slice, + .ptr_type => |ptr_type| ptr_type.flags.size != .Slice, .simple_type => |t| switch (t) { .f16, @@ -823,13 +823,13 @@ pub const Type = struct { pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| { - if (ptr_type.alignment.toByteUnitsOptional()) |a| { + if (ptr_type.flags.alignment.toByteUnitsOptional()) |a| { return @intCast(u32, a); } else if (opt_sema) |sema| { - const res = try ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .{ .sema = sema }); + const res = try ptr_type.child.toType().abiAlignmentAdvanced(mod, .{ .sema = sema }); return res.scalar; } else { - return (ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; + return (ptr_type.child.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } }, .opt_type => |child| child.toType().ptrAlignmentAdvanced(mod, opt_sema), @@ -839,8 +839,8 @@ pub const Type = struct { pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.address_space, - .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.address_space, + .ptr_type => |ptr_type| ptr_type.flags.address_space, + .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.flags.address_space, else => unreachable, }; } @@ -1297,7 +1297,7 @@ pub const Type = struct { if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target) }; }, - .ptr_type => |ptr_type| switch (ptr_type.size) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, else => return .{ .scalar = @divExact(target.ptrBitWidth(), 8) }, }, @@ -1620,7 +1620,7 @@ pub const Type = struct { switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| return int_type.bits, - .ptr_type => |ptr_type| switch (ptr_type.size) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice => return target.ptrBitWidth() * 2, else => return target.ptrBitWidth(), }, @@ -1795,7 +1795,7 @@ pub const Type = struct { pub fn isSinglePointer(ty: Type, mod: *const Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_info| ptr_info.size == .One, + .ptr_type => |ptr_info| ptr_info.flags.size == .One, else => false, }; } @@ -1808,14 +1808,14 @@ pub const Type = struct { /// Returns `null` if `ty` is not a pointer. pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_info| ptr_info.size, + .ptr_type => |ptr_info| ptr_info.flags.size, else => null, }; } pub fn isSlice(ty: Type, mod: *const Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.size == .Slice, + .ptr_type => |ptr_type| ptr_type.flags.size == .Slice, else => false, }; } @@ -1826,7 +1826,7 @@ pub const Type = struct { pub fn isConstPtr(ty: Type, mod: *const Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.is_const, + .ptr_type => |ptr_type| ptr_type.flags.is_const, else => false, }; } @@ -1837,14 +1837,14 @@ pub const Type = struct { pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool { return switch (ip.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.is_volatile, + .ptr_type => |ptr_type| ptr_type.flags.is_volatile, else => false, }; } pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.is_allowzero, + .ptr_type => |ptr_type| ptr_type.flags.is_allowzero, .opt_type => true, else => false, }; @@ -1852,21 +1852,21 @@ pub const Type = struct { pub fn isCPtr(ty: Type, mod: *const Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.size == .C, + .ptr_type => |ptr_type| ptr_type.flags.size == .C, else => false, }; } pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| switch (ptr_type.size) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice => false, .One, .Many, .C => true, }, .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |p| switch (p.size) { + .ptr_type => |p| switch (p.flags.size) { .Slice, .C => false, - .Many, .One => !p.is_allowzero, + .Many, .One => !p.flags.is_allowzero, }, else => false, }, @@ -1887,14 +1887,14 @@ pub const Type = struct { pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .opt_type => |child_type| switch (mod.intern_pool.indexToKey(child_type)) { - .ptr_type => |ptr_type| switch (ptr_type.size) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .C => false, - .Slice, .Many, .One => !ptr_type.is_allowzero, + .Slice, .Many, .One => !ptr_type.flags.is_allowzero, }, .error_set_type => true, else => false, }, - .ptr_type => |ptr_type| ptr_type.size == .C, + .ptr_type => |ptr_type| ptr_type.flags.size == .C, else => false, }; } @@ -1904,11 +1904,11 @@ pub const Type = struct { /// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`. pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ptr_type.size == .C, + .ptr_type => |ptr_type| ptr_type.flags.size == .C, .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |ptr_type| switch (ptr_type.size) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice, .C => false, - .Many, .One => !ptr_type.is_allowzero, + .Many, .One => !ptr_type.flags.is_allowzero, }, else => false, }, @@ -1938,9 +1938,9 @@ pub const Type = struct { /// For anyframe->T, returns T. pub fn elemType2(ty: Type, mod: *const Module) Type { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| switch (ptr_type.size) { - .One => ptr_type.elem_type.toType().shallowElemType(mod), - .Many, .C, .Slice => ptr_type.elem_type.toType(), + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .One => ptr_type.child.toType().shallowElemType(mod), + .Many, .C, .Slice => ptr_type.child.toType(), }, .anyframe_type => |child| { assert(child != .none); @@ -1974,7 +1974,7 @@ pub const Type = struct { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .opt_type => |child| child.toType(), .ptr_type => |ptr_type| b: { - assert(ptr_type.size == .C); + assert(ptr_type.flags.size == .C); break :b ty; }, else => unreachable, @@ -2390,7 +2390,7 @@ pub const Type = struct { pub fn fnReturnTypeIp(ty: Type, ip: *const InternPool) Type { return switch (ip.indexToKey(ty.toIntern())) { - .ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type, + .ptr_type => |ptr_type| ip.indexToKey(ptr_type.child).func_type.return_type, .func_type => |func_type| func_type.return_type, else => unreachable, }.toType(); @@ -2672,7 +2672,7 @@ pub const Type = struct { else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => false, .ptr_type => |ptr_type| { - const child_ty = ptr_type.elem_type.toType(); + const child_ty = ptr_type.child.toType(); if (child_ty.zigTypeTag(mod) == .Fn) { return false; } else { @@ -3374,17 +3374,17 @@ pub const Type = struct { pub fn fromKey(p: InternPool.Key.PtrType) Data { return .{ - .pointee_type = p.elem_type.toType(), + .pointee_type = p.child.toType(), .sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null, - .@"align" = @intCast(u32, p.alignment.toByteUnits(0)), - .@"addrspace" = p.address_space, - .bit_offset = p.bit_offset, - .host_size = p.host_size, - .vector_index = p.vector_index, - .@"allowzero" = p.is_allowzero, - .mutable = !p.is_const, - .@"volatile" = p.is_volatile, - .size = p.size, + .@"align" = @intCast(u32, p.flags.alignment.toByteUnits(0)), + .@"addrspace" = p.flags.address_space, + .bit_offset = p.packed_offset.bit_offset, + .host_size = p.packed_offset.host_size, + .vector_index = p.flags.vector_index, + .@"allowzero" = p.flags.is_allowzero, + .mutable = !p.flags.is_const, + .@"volatile" = p.flags.is_volatile, + .size = p.flags.size, }; } }; @@ -3478,17 +3478,21 @@ pub const Type = struct { } return mod.ptrType(.{ - .elem_type = d.pointee_type.ip_index, + .child = d.pointee_type.ip_index, .sentinel = if (d.sentinel) |s| s.ip_index else .none, - .alignment = InternPool.Alignment.fromByteUnits(d.@"align"), - .host_size = d.host_size, - .bit_offset = d.bit_offset, - .vector_index = d.vector_index, - .size = d.size, - .is_const = !d.mutable, - .is_volatile = d.@"volatile", - .is_allowzero = d.@"allowzero", - .address_space = d.@"addrspace", + .flags = .{ + .alignment = InternPool.Alignment.fromByteUnits(d.@"align"), + .vector_index = d.vector_index, + .size = d.size, + .is_const = !d.mutable, + .is_volatile = d.@"volatile", + .is_allowzero = d.@"allowzero", + .address_space = d.@"addrspace", + }, + .packed_offset = .{ + .host_size = d.host_size, + .bit_offset = d.bit_offset, + }, }); } diff --git a/src/value.zig b/src/value.zig index 92dd3a3c3f2b..fe6a15154cb9 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2080,8 +2080,8 @@ pub const Value = struct { else => val, }; var ptr_ty_key = mod.intern_pool.indexToKey(elem_ptr_ty.toIntern()).ptr_type; - assert(ptr_ty_key.size != .Slice); - ptr_ty_key.size = .Many; + assert(ptr_ty_key.flags.size != .Slice); + ptr_ty_key.flags.size = .Many; return (try mod.intern(.{ .ptr = .{ .ty = elem_ptr_ty.toIntern(), .addr = .{ .elem = .{ From 91fb45a51b3fe29772c71cc353aaf1e07caeb507 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 30 May 2023 21:25:48 -0400 Subject: [PATCH 142/205] Sema: fix comptime error set comparisons --- src/Sema.zig | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Sema.zig b/src/Sema.zig index 9c458cc433c9..2ee3404f66a3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -15392,7 +15392,9 @@ fn zirCmpEq( if (lval.isUndef(mod) or rval.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - if (lval.toIntern() == rval.toIntern()) { + const lkey = mod.intern_pool.indexToKey(lval.toIntern()); + const rkey = mod.intern_pool.indexToKey(rval.toIntern()); + if ((lkey.err.name == rkey.err.name) == (op == .eq)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; From 1430ac2fbba9c8077b9b164b97010c1bac195ed7 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 30 May 2023 21:37:36 -0400 Subject: [PATCH 143/205] Type: fix `@sizeOf(?anyerror)` --- src/type.zig | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/type.zig b/src/type.zig index fdac8430f3bc..049ca1ebd835 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1886,11 +1886,8 @@ pub const Type = struct { /// See also `isPtrLikeOptional`. pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .opt_type => |child_type| switch (mod.intern_pool.indexToKey(child_type)) { - .ptr_type => |ptr_type| switch (ptr_type.flags.size) { - .C => false, - .Slice, .Many, .One => !ptr_type.flags.is_allowzero, - }, + .opt_type => |child_type| child_type == .anyerror_type or switch (mod.intern_pool.indexToKey(child_type)) { + .ptr_type => |ptr_type| ptr_type.flags.size != .C and !ptr_type.flags.is_allowzero, .error_set_type => true, else => false, }, From 99531b0d52392668fe9f86b5109fff74cd37aff3 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 30 May 2023 23:38:13 -0400 Subject: [PATCH 144/205] Sema: make sentinel load through array pointer comptime known --- src/Sema.zig | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/Sema.zig b/src/Sema.zig index 2ee3404f66a3..23a54da5cad8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -25520,7 +25520,19 @@ fn elemVal( return block.addBinOp(.ptr_elem_val, indexable, elem_index); }, .One => { - assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable + const array_ty = indexable_ty.childType(mod); // Guaranteed by checkIndexable + assert(array_ty.zigTypeTag(mod) == .Array); + + if (array_ty.sentinel(mod)) |sentinel| { + // index must be defined since it can access out of bounds + if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| { + const index = @intCast(usize, index_val.toUnsignedInt(mod)); + if (index == array_ty.arrayLen(mod)) { + return sema.addConstant(array_ty.childType(mod), sentinel); + } + } + } + const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety); return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src); }, From a0d4ef0acf50db06fdde8ff229d20d15afc7d402 Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 31 May 2023 04:42:18 +0100 Subject: [PATCH 145/205] InternPool: add representation for value of empty enums and unions This is a bit odd, because this value doesn't actually exist: see #15909. This gets all the empty enum/union behavior tests passing. Also adds an assertion to `Sema.analyzeBodyInner` which would have helped figure out the issue here much more quickly. --- src/InternPool.zig | 24 +++++++++++++++++++++++- src/Sema.zig | 29 +++++++++++++++++++---------- src/TypedValue.zig | 1 + src/arch/wasm/CodeGen.zig | 1 + src/codegen.zig | 1 + src/codegen/c.zig | 1 + src/codegen/llvm.zig | 1 + src/codegen/spirv.zig | 1 + src/type.zig | 19 +++++++++++++++++-- src/value.zig | 1 + 10 files changed, 66 insertions(+), 13 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 0b98dfcae501..9047041db8e3 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -206,6 +206,10 @@ pub const Key = union(enum) { enum_literal: NullTerminatedString, /// A specific enum tag, indicated by the integer tag value. enum_tag: Key.EnumTag, + /// An empty enum or union. TODO: this value's existence is strange, because such a type in + /// reality has no values. See #15909. + /// Payload is the type for which we are an empty value. + empty_enum_value: Index, float: Key.Float, ptr: Ptr, opt: Opt, @@ -670,6 +674,7 @@ pub const Key = union(enum) { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .inferred_error_set_type, => |info| { var hasher = std.hash.Wyhash.init(seed); @@ -957,6 +962,10 @@ pub const Key = union(enum) { const b_info = b.enum_tag; return std.meta.eql(a_info, b_info); }, + .empty_enum_value => |a_info| { + const b_info = b.empty_enum_value; + return a_info == b_info; + }, .variable => |a_info| { const b_info = b.variable; @@ -1192,6 +1201,7 @@ pub const Key = union(enum) { .enum_literal => .enum_literal_type, .undef => |x| x, + .empty_enum_value => |x| x, .simple_value => |s| switch (s) { .undefined => .undefined_type, @@ -1980,6 +1990,7 @@ pub const Tag = enum(u8) { /// The set of values that are encoded this way is: /// * An array or vector which has length 0. /// * A struct which has all fields comptime-known. + /// * An empty enum or union. TODO: this value's existence is strange, because such a type in reality has no values. See #15909 /// data is Index of the type, which is known to be zero bits at runtime. only_possible_value, /// data is extra index to Key.Union. @@ -2952,6 +2963,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, + .type_enum_auto, + .type_enum_explicit, + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + => .{ .empty_enum_value = ty }, + else => unreachable, }; }, @@ -3755,6 +3773,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, + .empty_enum_value => |enum_or_union_ty| ip.items.appendAssumeCapacity(.{ + .tag = .only_possible_value, + .data = @enumToInt(enum_or_union_ty), + }), + .float => |float| { switch (float.ty) { .f16_type => ip.items.appendAssumeCapacity(.{ @@ -5416,7 +5439,6 @@ pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { .noreturn_type => true, else => switch (ip.indexToKey(ty)) { .error_set_type => |error_set_type| error_set_type.names.len == 0, - .enum_type => |enum_type| enum_type.names.len == 0, else => false, }, }; diff --git a/src/Sema.zig b/src/Sema.zig index 23a54da5cad8..c2535eb4e9ed 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1725,8 +1725,12 @@ fn analyzeBodyInner( break :blk Air.Inst.Ref.void_value; }, }; - if (sema.isNoReturn(air_inst)) + if (sema.isNoReturn(air_inst)) { + // We're going to assume that the body itself is noreturn, so let's ensure that now + assert(block.instructions.items.len > 0); + assert(sema.isNoReturn(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1]))); break always_noreturn; + } map.putAssumeCapacity(inst, air_inst); i += 1; }; @@ -32150,6 +32154,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -33015,10 +33020,6 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); } - if (fields_len == 0) { - return; - } - const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; @@ -33301,7 +33302,7 @@ fn generateUnionTagTypeNumbered( .decl = new_decl_index, .namespace = .none, .tag_ty = if (enum_field_vals.len == 0) - .noreturn_type + (try mod.intType(.unsigned, 0)).toIntern() else mod.intern_pool.typeOf(enum_field_vals[0]), .names = enum_field_names, @@ -33351,7 +33352,7 @@ fn generateUnionTagTypeSimple( .decl = new_decl_index, .namespace = .none, .tag_ty = if (enum_field_names.len == 0) - .noreturn_type + (try mod.intType(.unsigned, 0)).toIntern() else (try mod.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(), .names = enum_field_names, @@ -33590,7 +33591,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse return null; const fields = union_obj.fields.values(); - if (fields.len == 0) return Value.@"unreachable"; + if (fields.len == 0) { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + } const only_field = fields[0]; if (only_field.ty.eql(resolved_ty, sema.mod)) { const msg = try Module.ErrorMsg.create( @@ -33630,7 +33634,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null; switch (enum_type.names.len) { - 0 => return Value.@"unreachable", + 0 => { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + }, 1 => return try mod.getCoerced((if (enum_type.values.len == 0) try mod.intern(.{ .int = .{ .ty = enum_type.tag_ty, @@ -33655,6 +33662,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -34143,6 +34151,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -34848,7 +34857,7 @@ fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { /// Avoids crashing the compiler when asking if inferred allocations are noreturn. fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool { - if (ref == .noreturn_type) return true; + if (ref == .unreachable_value) return true; if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) { .inferred_alloc, .inferred_alloc_comptime => return false, else => {}, diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 0128a3cbfb4c..81d25ed98a1e 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -248,6 +248,7 @@ pub fn print( try writer.writeAll(")"); return; }, + .empty_enum_value => return writer.writeAll("(empty enum value)"), .float => |float| switch (float.storage) { inline else => |x| return writer.print("{}", .{x}), }, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index e397cf29f8e3..e92bd8f6769c 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3156,6 +3156,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { .extern_func, .func, .enum_literal, + .empty_enum_value, => unreachable, // non-runtime values .int => { const int_info = ty.intInfo(mod); diff --git a/src/codegen.zig b/src/codegen.zig index 7fd432dcebee..1470b94f1b86 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -242,6 +242,7 @@ pub fn generateSymbol( .extern_func, .func, .enum_literal, + .empty_enum_value, => unreachable, // non-runtime values .int => { const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 56f6c669dfe2..eea6e1489686 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -946,6 +946,7 @@ pub const DeclGen = struct { .extern_func, .func, .enum_literal, + .empty_enum_value, => unreachable, // non-runtime values .int => |int| switch (int.storage) { .u64, .i64, .big_int => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8b78c4067a2e..91dcbe11a5b2 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3246,6 +3246,7 @@ pub const DeclGen = struct { }, .variable, .enum_literal, + .empty_enum_value, => unreachable, // non-runtime values .extern_func, .func => { const fn_decl_index = switch (val_key) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index ddd7f36435cd..c7bea80eb69f 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -660,6 +660,7 @@ pub const DeclGen = struct { .extern_func, .func, .enum_literal, + .empty_enum_value, => unreachable, // non-runtime values .int => try self.addInt(ty, val), .err => |err| { diff --git a/src/type.zig b/src/type.zig index 049ca1ebd835..0c4dfb7e7e9f 100644 --- a/src/type.zig +++ b/src/type.zig @@ -439,6 +439,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -655,6 +656,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -764,6 +766,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -1098,6 +1101,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -1515,6 +1519,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -1749,6 +1754,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -2302,6 +2308,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -2584,7 +2591,10 @@ pub const Type = struct { .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; - if (union_obj.fields.count() == 0) return Value.@"unreachable"; + if (union_obj.fields.count() == 0) { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + } const only_field = union_obj.fields.values()[0]; const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; const only = try mod.intern(.{ .un = .{ @@ -2613,7 +2623,10 @@ pub const Type = struct { if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null; switch (enum_type.names.len) { - 0 => return Value.@"unreachable", + 0 => { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + }, 1 => { if (enum_type.values.len == 0) { const only = try mod.intern(.{ .enum_tag = .{ @@ -2645,6 +2658,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -2790,6 +2804,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, diff --git a/src/value.zig b/src/value.zig index fe6a15154cb9..89ba1fba6784 100644 --- a/src/value.zig +++ b/src/value.zig @@ -441,6 +441,7 @@ pub const Value = struct { .err, .enum_literal, .enum_tag, + .empty_enum_value, .float, => val, From aed142ebaa65ff3ced948b18c55835b540e1e04a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 30 May 2023 22:29:52 -0700 Subject: [PATCH 146/205] InternPool: further optimize Key hashing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a continuation of 2f24228c758bc8a35d13379703bc1695008212b0. This commit comes with smaller gains, but gains nonetheless. memcpy is showing up as much less interesting in callgrind output for behavior tests. Current status: this branch is 1.15 ± 0.02 times slower than merge-base. --- src/InternPool.zig | 128 ++++++++++++++++++++------------------------- 1 file changed, 56 insertions(+), 72 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 9047041db8e3..4fc7e3f4e750 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -197,7 +197,7 @@ pub const Key = union(enum) { undef: Index, runtime_value: TypeValue, simple_value: SimpleValue, - variable: Key.Variable, + variable: Variable, extern_func: ExternFunc, func: Func, int: Key.Int, @@ -205,19 +205,19 @@ pub const Key = union(enum) { error_union: ErrorUnion, enum_literal: NullTerminatedString, /// A specific enum tag, indicated by the integer tag value. - enum_tag: Key.EnumTag, + enum_tag: EnumTag, /// An empty enum or union. TODO: this value's existence is strange, because such a type in /// reality has no values. See #15909. /// Payload is the type for which we are an empty value. empty_enum_value: Index, - float: Key.Float, + float: Float, ptr: Ptr, opt: Opt, /// An instance of a struct, array, or vector. /// Each element/field stored as an `Index`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, /// so the slice length will be one more than the type's array length. - aggregate: Key.Aggregate, + aggregate: Aggregate, /// An instance of a union. un: Union, @@ -226,14 +226,15 @@ pub const Key = union(enum) { /// A comptime function call with a memoized result. memoized_call: Key.MemoizedCall, - pub const TypeValue = struct { + pub const TypeValue = extern struct { ty: Index, val: Index, }; pub const IntType = std.builtin.Type.Int; - pub const ErrorUnionType = struct { + /// Extern for hashing via memory reinterpretation. + pub const ErrorUnionType = extern struct { error_set_type: Index, payload_type: Index, }; @@ -296,25 +297,27 @@ pub const Key = union(enum) { pub const AddressSpace = std.builtin.AddressSpace; }; - pub const ArrayType = struct { + /// Extern so that hashing can be done via memory reinterpreting. + pub const ArrayType = extern struct { len: u64, child: Index, sentinel: Index = .none, }; - pub const VectorType = struct { + /// Extern so that hashing can be done via memory reinterpreting. + pub const VectorType = extern struct { len: u32, child: Index, }; - pub const OpaqueType = struct { + pub const OpaqueType = extern struct { /// The Decl that corresponds to the opaque itself. decl: Module.Decl.Index, /// Represents the declarations inside this opaque. namespace: Module.Namespace.Index, }; - pub const StructType = struct { + pub const StructType = extern struct { /// The `none` tag is used to represent a struct with no fields. index: Module.Struct.OptionalIndex, /// May be `none` if the struct has no declarations. @@ -501,7 +504,8 @@ pub const Key = union(enum) { lib_name: OptionalNullTerminatedString, }; - pub const Func = struct { + /// Extern so it can be hashed by reinterpreting memory. + pub const Func = extern struct { ty: Index, index: Module.Fn.Index, }; @@ -534,7 +538,7 @@ pub const Key = union(enum) { }; }; - pub const Error = struct { + pub const Error = extern struct { ty: Index, name: NullTerminatedString, }; @@ -549,7 +553,7 @@ pub const Key = union(enum) { }; }; - pub const EnumTag = struct { + pub const EnumTag = extern struct { /// The enum type. ty: Index, /// The integer tag value which has the integer tag type of the enum. @@ -600,14 +604,14 @@ pub const Key = union(enum) { }; /// `null` is represented by the `val` field being `none`. - pub const Opt = struct { + pub const Opt = extern struct { /// This is the optional type; not the payload type. ty: Index, /// This could be `none`, indicating the optional is `null`. val: Index, }; - pub const Union = struct { + pub const Union = extern struct { /// This is the union type; not the field type. ty: Index, /// Indicates the active field. @@ -654,10 +658,10 @@ pub const Key = union(enum) { const asBytes = std.mem.asBytes; const KeyTag = @typeInfo(Key).Union.tag_type.?; const seed = @enumToInt(@as(KeyTag, key)); - switch (key) { - .ptr_type => |x| return WyhashKing.hash(seed, asBytes(&x)), - - inline .int_type, + return switch (key) { + // TODO: assert no padding in these types + inline .ptr_type, + .func, .array_type, .vector_type, .opt_type, @@ -667,31 +671,26 @@ pub const Key = union(enum) { .simple_value, .opt, .struct_type, - .union_type, - .un, .undef, .err, - .error_union, .enum_literal, .enum_tag, .empty_enum_value, .inferred_error_set_type, - => |info| { - var hasher = std.hash.Wyhash.init(seed); - std.hash.autoHash(&hasher, info); - return hasher.final(); - }, + .un, + => |x| WyhashKing.hash(seed, asBytes(&x)), - .runtime_value => |runtime_value| { - var hasher = std.hash.Wyhash.init(seed); - std.hash.autoHash(&hasher, runtime_value.val); - return hasher.final(); - }, - .opaque_type => |opaque_type| { - var hasher = std.hash.Wyhash.init(seed); - std.hash.autoHash(&hasher, opaque_type.decl); - return hasher.final(); + .int_type => |x| WyhashKing.hash(seed + @enumToInt(x.signedness), asBytes(&x.bits)), + .union_type => |x| WyhashKing.hash(seed + @enumToInt(x.runtime_tag), asBytes(&x.index)), + + .error_union => |x| switch (x.val) { + .err_name => |y| WyhashKing.hash(seed + 0, asBytes(&x.ty) ++ asBytes(&y)), + .payload => |y| WyhashKing.hash(seed + 1, asBytes(&x.ty) ++ asBytes(&y)), }, + + .runtime_value => |x| WyhashKing.hash(seed, asBytes(&x.val)), + .opaque_type => |x| WyhashKing.hash(seed, asBytes(&x.decl)), + .enum_type => |enum_type| { var hasher = std.hash.Wyhash.init(seed); std.hash.autoHash(&hasher, enum_type.decl); @@ -703,18 +702,7 @@ pub const Key = union(enum) { std.hash.autoHash(&hasher, variable.decl); return hasher.final(); }, - .extern_func => |extern_func| { - var hasher = std.hash.Wyhash.init(seed); - std.hash.autoHash(&hasher, extern_func.ty); - std.hash.autoHash(&hasher, extern_func.decl); - return hasher.final(); - }, - .func => |func| { - var hasher = std.hash.Wyhash.init(seed); - std.hash.autoHash(&hasher, func.ty); - std.hash.autoHash(&hasher, func.index); - return hasher.final(); - }, + .extern_func => |x| WyhashKing.hash(seed, asBytes(&x.ty) ++ asBytes(&x.decl)), .int => |int| { var hasher = std.hash.Wyhash.init(seed); @@ -865,11 +853,7 @@ pub const Key = union(enum) { return hasher.final(); }, - .memoized_decl => |memoized_decl| { - var hasher = std.hash.Wyhash.init(seed); - std.hash.autoHash(&hasher, memoized_decl.val); - return hasher.final(); - }, + .memoized_decl => |x| WyhashKing.hash(seed, asBytes(&x.val)), .memoized_call => |memoized_call| { var hasher = std.hash.Wyhash.init(seed); @@ -877,7 +861,7 @@ pub const Key = union(enum) { for (memoized_call.arg_values) |arg| std.hash.autoHash(&hasher, arg); return hasher.final(); }, - } + }; } pub fn eql(a: Key, b: Key, ip: *const InternPool) bool { @@ -1474,7 +1458,7 @@ pub const Index = enum(u32) { error_union_error: struct { data: *Key.Error }, error_union_payload: struct { data: *Tag.TypeValue }, enum_literal: struct { data: NullTerminatedString }, - enum_tag: struct { data: *Key.EnumTag }, + enum_tag: struct { data: *Tag.EnumTag }, float_f16: struct { data: f16 }, float_f32: struct { data: f32 }, float_f64: struct { data: *Float64 }, @@ -1491,7 +1475,7 @@ pub const Index = enum(u32) { bytes: struct { data: *Bytes }, aggregate: struct { const @"data.ty.data.len orelse data.ty.data.fields_len" = opaque {}; - data: *Aggregate, + data: *Tag.Aggregate, @"trailing.element_values.len": *@"data.ty.data.len orelse data.ty.data.fields_len", trailing: struct { element_values: []Index }, }, @@ -1944,7 +1928,7 @@ pub const Tag = enum(u8) { /// data is `NullTerminatedString` of the error name. enum_literal, /// An enum tag value. - /// data is extra index of `Key.EnumTag`. + /// data is extra index of `EnumTag`. enum_tag, /// An f16 value. /// data is float value bitcasted to u16 and zero-extended. @@ -2121,6 +2105,14 @@ pub const Tag = enum(u8) { _: u28 = 0, }; }; + + /// Trailing: + /// 0. element: Index for each len + /// len is determined by the aggregate type. + pub const Aggregate = struct { + /// The type of the aggregate. + ty: Index, + }; }; /// Trailing: @@ -2161,14 +2153,6 @@ pub const Bytes = struct { bytes: String, }; -/// Trailing: -/// 0. element: Index for each len -/// len is determined by the aggregate type. -pub const Aggregate = struct { - /// The type of the aggregate. - ty: Index, -}; - pub const Repeated = struct { /// The type of the aggregate. ty: Index, @@ -2982,7 +2966,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, .aggregate => { - const extra = ip.extraDataTrail(Aggregate, data); + const extra = ip.extraDataTrail(Tag.Aggregate, data); const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.data.ty)); const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]); return .{ .aggregate = .{ @@ -3014,7 +2998,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, .enum_literal => .{ .enum_literal = @intToEnum(NullTerminatedString, data) }, - .enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) }, + .enum_tag => .{ .enum_tag = ip.extraData(Tag.EnumTag, data) }, .memoized_decl => .{ .memoized_decl = ip.extraData(Key.MemoizedDecl, data) }, .memoized_call => { @@ -3989,11 +3973,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { try ip.extra.ensureUnusedCapacity( gpa, - @typeInfo(Aggregate).Struct.fields.len + len_including_sentinel, + @typeInfo(Tag.Aggregate).Struct.fields.len + len_including_sentinel, ); ip.items.appendAssumeCapacity(.{ .tag = .aggregate, - .data = ip.addExtraAssumeCapacity(Aggregate{ + .data = ip.addExtraAssumeCapacity(Tag.Aggregate{ .ty = aggregate.ty, }), }); @@ -4992,7 +4976,7 @@ fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .error_set_error, .error_union_error => @sizeOf(Key.Error), .error_union_payload => @sizeOf(Tag.TypeValue), .enum_literal => 0, - .enum_tag => @sizeOf(Key.EnumTag), + .enum_tag => @sizeOf(Tag.EnumTag), .bytes => b: { const info = ip.extraData(Bytes, data); @@ -5001,9 +4985,9 @@ fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { @boolToInt(ip.string_bytes.items[@enumToInt(info.bytes) + len - 1] != 0); }, .aggregate => b: { - const info = ip.extraData(Aggregate, data); + const info = ip.extraData(Tag.Aggregate, data); const fields_len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty)); - break :b @sizeOf(Aggregate) + (@sizeOf(Index) * fields_len); + break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); }, .repeated => @sizeOf(Repeated), From d019229c2c3432c9053594eb140b255c3be9ebeb Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 31 May 2023 00:16:37 -0400 Subject: [PATCH 147/205] Sema: avoid invalided key access --- src/Sema.zig | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index c2535eb4e9ed..9af9b6eace42 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -15966,8 +15966,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } })).toValue()), .Fn => { // TODO: look into memoizing this result. - const info = mod.typeToFunc(ty).?; - var params_anon_decl = try block.startAnonDecl(); defer params_anon_decl.deinit(); @@ -15993,8 +15991,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const param_info_decl = mod.declPtr(param_info_decl_index); const param_info_ty = param_info_decl.val.toType(); - const param_vals = try sema.arena.alloc(InternPool.Index, info.param_types.len); - for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| { + const param_vals = try sema.arena.alloc(InternPool.Index, mod.typeToFunc(ty).?.param_types.len); + for (param_vals, 0..) |*param_val, i| { + const info = mod.typeToFunc(ty).?; + const param_ty = info.param_types[i]; const is_generic = param_ty == .generic_poison_type; const param_ty_val = try mod.intern_pool.get(gpa, .{ .opt = .{ .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }), @@ -16046,6 +16046,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); }; + const info = mod.typeToFunc(ty).?; const ret_ty_opt = try mod.intern(.{ .opt = .{ .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }), .val = if (info.return_type == .generic_poison_type) .none else info.return_type, From 26fac15f485e89dc7106256e1aa79184c1761efd Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 31 May 2023 00:42:24 -0400 Subject: [PATCH 148/205] math.big.int: fix ctz of zero --- lib/std/math/big/int.zig | 4 ++-- src/value.zig | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index 13ead1c42122..ec79d843da30 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -2512,7 +2512,7 @@ pub const Const = struct { return total_limb_lz + bits - total_limb_bits; } - pub fn ctz(a: Const) Limb { + pub fn ctz(a: Const, bits: Limb) Limb { // Limbs are stored in little-endian order. var result: Limb = 0; for (a.limbs) |limb| { @@ -2520,7 +2520,7 @@ pub const Const = struct { result += limb_tz; if (limb_tz != @sizeOf(Limb) * 8) break; } - return result; + return @min(result, bits); } }; diff --git a/src/value.zig b/src/value.zig index 89ba1fba6784..62a83c7901b5 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1216,10 +1216,10 @@ pub const Value = struct { return bigint.clz(ty.intInfo(mod).bits); } - pub fn ctz(val: Value, _: Type, mod: *Module) u64 { + pub fn ctz(val: Value, ty: Type, mod: *Module) u64 { var bigint_buf: BigIntSpace = undefined; const bigint = val.toBigInt(&bigint_buf, mod); - return bigint.ctz(); + return bigint.ctz(ty.intInfo(mod).bits); } pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { From 71c4077c359096d0706a251a10eeae6c41f299ca Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 31 May 2023 01:25:02 -0400 Subject: [PATCH 149/205] Value: fix null test for c pointers --- src/value.zig | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/value.zig b/src/value.zig index 62a83c7901b5..db37d8e9e76f 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2131,19 +2131,20 @@ pub const Value = struct { } /// Asserts the value is not undefined and not unreachable. - /// Integer value 0 is considered null because of C pointers. + /// C pointers with an integer value of 0 are also considered null. pub fn isNull(val: Value, mod: *Module) bool { return switch (val.toIntern()) { .undef => unreachable, .unreachable_value => unreachable, - .null_value => true, - else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => unreachable, - .int => { - var buf: BigIntSpace = undefined; - return val.toBigInt(&buf, mod).eqZero(); + .ptr => |ptr| switch (ptr.addr) { + .int => { + var buf: BigIntSpace = undefined; + return val.toBigInt(&buf, mod).eqZero(); + }, + else => false, }, .opt => |opt| opt.val == .none, else => false, From b2391a7d4425418a29598523dcbdf2bfc9325ecd Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 31 May 2023 02:32:09 -0400 Subject: [PATCH 150/205] Sema: remove opv status from arrays with sentinels Being able to create a pointer to the non-opv sentinel means that these types have to actually be stored. --- src/Sema.zig | 6 ++++-- src/type.zig | 5 +++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 9af9b6eace42..bc34109bc994 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -33468,11 +33468,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .inferred_error_set_type, => null, - inline .array_type, .vector_type => |seq_type| { - if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{ + inline .array_type, .vector_type => |seq_type, seq_tag| { + const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; + if (seq_type.len + @boolToInt(has_sentinel) == 0) return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, } })).toValue(); + if (try sema.typeHasOnePossibleValue(seq_type.child.toType())) |opv| { return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), diff --git a/src/type.zig b/src/type.zig index 0c4dfb7e7e9f..bbc2a2ce604c 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2479,8 +2479,9 @@ pub const Type = struct { .inferred_error_set_type, => return null, - inline .array_type, .vector_type => |seq_type| { - if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{ + inline .array_type, .vector_type => |seq_type, seq_tag| { + const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; + if (seq_type.len + @boolToInt(has_sentinel) == 0) return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, } })).toValue(); From 870e3843c5736def21234ca8b7159b179985505c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 31 May 2023 13:38:25 -0700 Subject: [PATCH 151/205] Sema: elide comptime-checked slice safety Before, Zig would emit a start<=end safety check for `foo[1..2]` even though it was already checked at compile-time. --- src/Sema.zig | 46 ++++++++++++++++++++-------------------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index bc34109bc994..2dba3cac27c7 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -24146,20 +24146,6 @@ fn panicIndexOutOfBounds( try sema.safetyCheckFormatted(parent_block, ok, "panicOutOfBounds", &.{ index, len }); } -fn panicStartGreaterThanEnd( - sema: *Sema, - parent_block: *Block, - start: Air.Inst.Ref, - end: Air.Inst.Ref, -) !void { - assert(!parent_block.is_comptime); - const ok = try parent_block.addBinOp(.cmp_lte, start, end); - if (!sema.mod.comp.formatted_panics) { - return sema.addSafetyCheck(parent_block, ok, .start_index_greater_than_end); - } - try sema.safetyCheckFormatted(parent_block, ok, "panicStartGreaterThanEnd", &.{ start, end }); -} - fn panicInactiveUnionField( sema: *Sema, parent_block: *Block, @@ -30209,11 +30195,12 @@ fn analyzeSlice( }; const slice_sentinel = if (sentinel_opt != .none) sentinel else null; + var checked_start_lte_end = by_length; + var runtime_src: ?LazySrcLoc = null; + // requirement: start <= end - var need_start_gt_end_check = true; if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { if (try sema.resolveDefinedValue(block, start_src, start)) |start_val| { - need_start_gt_end_check = false; if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, Type.usize))) { return sema.fail( block, @@ -30225,6 +30212,7 @@ fn analyzeSlice( }, ); } + checked_start_lte_end = true; if (try sema.resolveMaybeUndefVal(new_ptr)) |ptr_val| sentinel_check: { const expected_sentinel = sentinel orelse break :sentinel_check; const start_int = start_val.getUnsignedInt(mod).?; @@ -30266,13 +30254,26 @@ fn analyzeSlice( }; return sema.failWithOwnedErrorMsg(msg); } + } else { + runtime_src = ptr_src; } + } else { + runtime_src = start_src; } + } else { + runtime_src = end_src; } - if (!by_length and block.wantSafety() and !block.is_comptime and need_start_gt_end_check) { + if (!checked_start_lte_end and block.wantSafety() and !block.is_comptime) { // requirement: start <= end - try sema.panicStartGreaterThanEnd(block, start, end); + assert(!block.is_comptime); + try sema.requireRuntimeBlock(block, src, runtime_src.?); + const ok = try block.addBinOp(.cmp_lte, start, end); + if (!sema.mod.comp.formatted_panics) { + try sema.addSafetyCheck(block, ok, .start_index_greater_than_end); + } else { + try sema.safetyCheckFormatted(block, ok, "panicStartGreaterThanEnd", &.{ start, end }); + } } const new_len = if (by_length) try sema.coerce(block, Type.usize, uncasted_end_opt, end_src) @@ -30354,14 +30355,7 @@ fn analyzeSlice( .size = .Slice, }); - const runtime_src = if ((try sema.resolveMaybeUndefVal(ptr_or_slice)) == null) - ptr_src - else if ((try sema.resolveMaybeUndefVal(start)) == null) - start_src - else - end_src; - - try sema.requireRuntimeBlock(block, src, runtime_src); + try sema.requireRuntimeBlock(block, src, runtime_src.?); if (block.wantSafety()) { // requirement: slicing C ptr is non-null if (ptr_ptr_child_ty.isCPtr(mod)) { From bb526426e75ed456a7db6afa32447e5a76ac7ca1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 31 May 2023 15:00:48 -0700 Subject: [PATCH 152/205] InternPool: remove memoized_decl This is neither a type nor a value. Simplifies `addStrLit` as well as the many places that switch on `InternPool.Key`. This is a partial revert of bec29b9e498e08202679aa29a45dab2a06a69a1e. --- src/InternPool.zig | 36 +---------------------------- src/Module.zig | 5 ++++ src/Sema.zig | 48 ++++++++++++++++----------------------- src/TypedValue.zig | 4 +--- src/arch/wasm/CodeGen.zig | 4 +--- src/codegen.zig | 4 +--- src/codegen/c.zig | 1 - src/codegen/llvm.zig | 4 +--- src/codegen/spirv.zig | 4 +--- src/type.zig | 9 -------- src/value.zig | 4 +--- 11 files changed, 31 insertions(+), 92 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 4fc7e3f4e750..59ff9e405b47 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -221,8 +221,6 @@ pub const Key = union(enum) { /// An instance of a union. un: Union, - /// A declaration with a memoized value. - memoized_decl: MemoizedDecl, /// A comptime function call with a memoized result. memoized_call: Key.MemoizedCall, @@ -639,11 +637,6 @@ pub const Key = union(enum) { }; }; - pub const MemoizedDecl = struct { - val: Index, - decl: Module.Decl.Index, - }; - pub const MemoizedCall = struct { func: Module.Fn.Index, arg_values: []const Index, @@ -853,8 +846,6 @@ pub const Key = union(enum) { return hasher.final(); }, - .memoized_decl => |x| WyhashKing.hash(seed, asBytes(&x.val)), - .memoized_call => |memoized_call| { var hasher = std.hash.Wyhash.init(seed); std.hash.autoHash(&hasher, memoized_call.func); @@ -1134,11 +1125,6 @@ pub const Key = union(enum) { a_info.is_noinline == b_info.is_noinline; }, - .memoized_decl => |a_info| { - const b_info = b.memoized_decl; - return a_info.val == b_info.val; - }, - .memoized_call => |a_info| { const b_info = b.memoized_call; return a_info.func == b_info.func and @@ -1197,9 +1183,7 @@ pub const Key = union(enum) { .generic_poison => .generic_poison_type, }, - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, }; } }; @@ -1481,7 +1465,6 @@ pub const Index = enum(u32) { }, repeated: struct { data: *Repeated }, - memoized_decl: struct { data: *Key.MemoizedDecl }, memoized_call: struct { const @"data.args_len" = opaque {}; data: *MemoizedCall, @@ -1989,9 +1972,6 @@ pub const Tag = enum(u8) { /// data is extra index to `Repeated`. repeated, - /// A memoized declaration value. - /// data is extra index to `Key.MemoizedDecl` - memoized_decl, /// A memoized comptime function call result. /// data is extra index to `MemoizedCall` memoized_call, @@ -2004,7 +1984,6 @@ pub const Tag = enum(u8) { const ExternFunc = Key.ExternFunc; const Func = Key.Func; const Union = Key.Union; - const MemoizedDecl = Key.MemoizedDecl; const TypePointer = Key.PtrType; fn Payload(comptime tag: Tag) type { @@ -2082,7 +2061,6 @@ pub const Tag = enum(u8) { .bytes => Bytes, .aggregate => Aggregate, .repeated => Repeated, - .memoized_decl => MemoizedDecl, .memoized_call => MemoizedCall, }; } @@ -3000,7 +2978,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .enum_literal => .{ .enum_literal = @intToEnum(NullTerminatedString, data) }, .enum_tag => .{ .enum_tag = ip.extraData(Tag.EnumTag, data) }, - .memoized_decl => .{ .memoized_decl = ip.extraData(Key.MemoizedDecl, data) }, .memoized_call => { const extra = ip.extraDataTrail(MemoizedCall, data); return .{ .memoized_call = .{ @@ -3995,14 +3972,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, - .memoized_decl => |memoized_decl| { - assert(memoized_decl.val != .none); - ip.items.appendAssumeCapacity(.{ - .tag = .memoized_decl, - .data = try ip.addExtra(gpa, memoized_decl), - }); - }, - .memoized_call => |memoized_call| { for (memoized_call.arg_values) |arg| assert(arg != .none); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(MemoizedCall).Struct.fields.len + @@ -5005,7 +4974,6 @@ fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .only_possible_value => 0, .union_value => @sizeOf(Key.Union), - .memoized_decl => @sizeOf(Key.MemoizedDecl), .memoized_call => b: { const info = ip.extraData(MemoizedCall, data); break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); @@ -5383,7 +5351,6 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .float_comptime_float => .comptime_float_type, - .memoized_decl => unreachable, .memoized_call => unreachable, }, @@ -5624,7 +5591,6 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .aggregate, .repeated, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, diff --git a/src/Module.zig b/src/Module.zig index 862025d8f907..cf0d222a2ec8 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -88,6 +88,9 @@ embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, /// Stores all Type and Value objects; periodically garbage collected. intern_pool: InternPool = .{}, +/// This is currently only used for string literals. +memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, + /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. @@ -561,6 +564,7 @@ pub const Decl = struct { } mod.destroyFunc(func); } + _ = mod.memoized_decls.remove(decl.val.ip_index); if (decl.value_arena) |value_arena| { value_arena.deinit(gpa); decl.value_arena = null; @@ -3285,6 +3289,7 @@ pub fn deinit(mod: *Module) void { mod.namespaces_free_list.deinit(gpa); mod.allocated_namespaces.deinit(gpa); + mod.memoized_decls.deinit(gpa); mod.intern_pool.deinit(gpa); } diff --git a/src/Sema.zig b/src/Sema.zig index 2dba3cac27c7..be531af60fda 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5183,33 +5183,26 @@ fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins fn addStrLit(sema: *Sema, block: *Block, bytes: []const u8) CompileError!Air.Inst.Ref { const mod = sema.mod; - const memoized_decl_index = memoized: { - const ty = try mod.arrayType(.{ - .len = bytes.len, - .child = .u8_type, - .sentinel = .zero_u8, + const gpa = sema.gpa; + const ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); + const val = try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = bytes }, + } }); + const gop = try mod.memoized_decls.getOrPut(gpa, val); + if (!gop.found_existing) { + const new_decl_index = try mod.createAnonymousDecl(block, .{ + .ty = ty, + .val = val.toValue(), }); - const val = try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .bytes = bytes }, - } }); - - _ = try sema.typeHasRuntimeBits(ty); - const new_decl_index = try mod.createAnonymousDecl(block, .{ .ty = ty, .val = val.toValue() }); - errdefer mod.abortAnonDecl(new_decl_index); - - const memoized_index = try mod.intern(.{ .memoized_decl = .{ - .val = val, - .decl = new_decl_index, - } }); - const memoized_decl_index = mod.intern_pool.indexToKey(memoized_index).memoized_decl.decl; - if (memoized_decl_index != new_decl_index) - mod.abortAnonDecl(new_decl_index) - else - try mod.finalizeAnonDecl(new_decl_index); - break :memoized memoized_decl_index; - }; - return sema.analyzeDeclRef(memoized_decl_index); + gop.value_ptr.* = new_decl_index; + try mod.finalizeAnonDecl(new_decl_index); + } + return sema.analyzeDeclRef(gop.value_ptr.*); } fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -32156,7 +32149,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -33666,7 +33658,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -34155,7 +34146,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 81d25ed98a1e..1ff3ce9415f5 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -279,9 +279,7 @@ pub fn print( } else try writer.writeAll("..."); return writer.writeAll(" }"); }, - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, }, }; } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index e92bd8f6769c..9403223f30ed 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3255,9 +3255,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { else => unreachable, }, .un => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, } } diff --git a/src/codegen.zig b/src/codegen.zig index 1470b94f1b86..a4c88d1258a0 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -610,9 +610,7 @@ pub fn generateSymbol( } } }, - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, } return .ok; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index eea6e1489686..4b325122cad6 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -925,7 +925,6 @@ pub const DeclGen = struct { .error_set_type, .inferred_error_set_type, // memoization, not values - .memoized_decl, .memoized_call, => unreachable, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 91dcbe11a5b2..5da91e557301 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3796,9 +3796,7 @@ pub const DeclGen = struct { return llvm_union_ty.constNamedStruct(&fields, fields_len); } }, - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, } } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index c7bea80eb69f..85caec94902f 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -831,9 +831,7 @@ pub const DeclGen = struct { try self.addUndef(layout.padding); }, - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, } } }; diff --git a/src/type.zig b/src/type.zig index bbc2a2ce604c..61c9377b1dad 100644 --- a/src/type.zig +++ b/src/type.zig @@ -446,7 +446,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, } @@ -663,7 +662,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -773,7 +771,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }; @@ -1108,7 +1105,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -1526,7 +1522,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -1761,7 +1756,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, } @@ -2315,7 +2309,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -2666,7 +2659,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -2812,7 +2804,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, diff --git a/src/value.zig b/src/value.zig index db37d8e9e76f..aba0af176e16 100644 --- a/src/value.zig +++ b/src/value.zig @@ -478,9 +478,7 @@ pub const Value = struct { .val = un.val.toValue(), }), - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, }; } From c82a04d35f529c7dc8301059a0fe0b204f111145 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 31 May 2023 17:38:56 -0700 Subject: [PATCH 153/205] InternPool: debug dump all the data --- src/InternPool.zig | 118 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 105 insertions(+), 13 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 59ff9e405b47..edd3e7f1c223 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1453,7 +1453,7 @@ pub const Index = enum(u32) { float_comptime_float: struct { data: *Float128 }, variable: struct { data: *Tag.Variable }, extern_func: struct { data: *Key.ExternFunc }, - func: struct { data: *Key.Func }, + func: struct { data: *Tag.Func }, only_possible_value: DataIsIndex, union_value: struct { data: *Key.Union }, bytes: struct { data: *Bytes }, @@ -1949,7 +1949,7 @@ pub const Tag = enum(u8) { /// data is extra index to Key.ExternFunc. extern_func, /// A regular function. - /// data is extra index to Key.Func. + /// data is extra index to Func. func, /// This represents the only possible value for *some* types which have /// only one possible value. Not all only-possible-values are encoded this way; @@ -2893,8 +2893,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .is_weak_linkage = extra.flags.is_weak_linkage, } }; }, - .extern_func => .{ .extern_func = ip.extraData(Key.ExternFunc, data) }, - .func => .{ .func = ip.extraData(Key.Func, data) }, + .extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) }, + .func => .{ .func = ip.extraData(Tag.Func, data) }, .only_possible_value => { const ty = @intToEnum(Index, data); const ty_item = ip.items.get(@enumToInt(ty)); @@ -3349,12 +3349,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .extern_func => |extern_func| ip.items.appendAssumeCapacity(.{ .tag = .extern_func, - .data = try ip.addExtra(gpa, extern_func), + .data = try ip.addExtra(gpa, @as(Tag.ExternFunc, extern_func)), }), .func => |func| ip.items.appendAssumeCapacity(.{ .tag = .func, - .data = try ip.addExtra(gpa, func), + .data = try ip.addExtra(gpa, @as(Tag.Func, func)), }), .ptr => |ptr| { @@ -4715,7 +4715,7 @@ pub fn indexToFunc(ip: *const InternPool, val: Index) Module.Fn.OptionalIndex { const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .func) return .none; const datas = ip.items.items(.data); - return ip.extraData(Key.Func, datas[@enumToInt(val)]).index.toOptional(); + return ip.extraData(Tag.Func, datas[@enumToInt(val)]).index.toOptional(); } pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { @@ -4807,10 +4807,11 @@ pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { } pub fn dump(ip: *const InternPool) void { - dumpFallible(ip, std.heap.page_allocator) catch return; + dumpStatsFallible(ip, std.heap.page_allocator) catch return; + dumpAllFallible(ip) catch return; } -fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { +fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { const items_size = (1 + 4) * ip.items.len; const extra_size = 4 * ip.extra.items.len; const limbs_size = 8 * ip.limbs.items.len; @@ -4969,8 +4970,8 @@ fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .float_c_longdouble_f128 => @sizeOf(Float128), .float_comptime_float => @sizeOf(Float128), .variable => @sizeOf(Tag.Variable) + @sizeOf(Module.Decl), - .extern_func => @sizeOf(Key.ExternFunc) + @sizeOf(Module.Decl), - .func => @sizeOf(Key.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl), + .extern_func => @sizeOf(Tag.ExternFunc) + @sizeOf(Module.Decl), + .func => @sizeOf(Tag.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl), .only_possible_value => 0, .union_value => @sizeOf(Key.Union), @@ -4989,8 +4990,8 @@ fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { } }; counts.sort(SortContext{ .map = &counts }); - const len = @min(25, counts.count()); - std.debug.print(" top 25 tags:\n", .{}); + const len = @min(50, counts.count()); + std.debug.print(" top 50 tags:\n", .{}); for (counts.keys()[0..len], counts.values()[0..len]) |tag, stats| { std.debug.print(" {s}: {d} occurrences, {d} total bytes\n", .{ @tagName(tag), stats.count, stats.bytes, @@ -4998,6 +4999,97 @@ fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { } } +fn dumpAllFallible(ip: *const InternPool) anyerror!void { + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + var bw = std.io.bufferedWriter(std.io.getStdErr().writer()); + const w = bw.writer(); + for (tags, datas, 0..) |tag, data, i| { + try w.print("${d} = {s}(", .{ i, @tagName(tag) }); + switch (tag) { + .simple_type => try w.print("{s}", .{@tagName(@intToEnum(SimpleType, data))}), + .simple_value => try w.print("{s}", .{@tagName(@intToEnum(SimpleValue, data))}), + + .type_int_signed, + .type_int_unsigned, + .type_array_small, + .type_array_big, + .type_vector, + .type_pointer, + .type_optional, + .type_anyframe, + .type_error_union, + .type_error_set, + .type_inferred_error_set, + .type_enum_explicit, + .type_enum_nonexhaustive, + .type_enum_auto, + .type_opaque, + .type_struct, + .type_struct_ns, + .type_struct_anon, + .type_tuple_anon, + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + .type_function, + .undef, + .runtime_value, + .ptr_decl, + .ptr_mut_decl, + .ptr_comptime_field, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .int_u8, + .int_u16, + .int_u32, + .int_i32, + .int_usize, + .int_comptime_int_u32, + .int_comptime_int_i32, + .int_small, + .int_positive, + .int_negative, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .error_union_payload, + .enum_literal, + .enum_tag, + .bytes, + .aggregate, + .repeated, + .float_f16, + .float_f32, + .float_f64, + .float_f80, + .float_f128, + .float_c_longdouble_f80, + .float_c_longdouble_f128, + .float_comptime_float, + .variable, + .extern_func, + .func, + .union_value, + .memoized_call, + => try w.print("{d}", .{data}), + + .opt_null, + .type_slice, + .only_possible_value, + => try w.print("${d}", .{data}), + } + try w.writeAll(")\n"); + } + try bw.flush(); +} + pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct { return ip.allocated_structs.at(@enumToInt(index)); } From 08ae212772c38d6149464274edc07282e5418570 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 31 May 2023 20:24:12 -0400 Subject: [PATCH 154/205] InternPool: fix key for empty array with sentinel --- src/InternPool.zig | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index edd3e7f1c223..d593ad1e173f 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2899,7 +2899,17 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const ty = @intToEnum(Index, data); const ty_item = ip.items.get(@enumToInt(ty)); return switch (ty_item.tag) { - .type_array_big, .type_array_small, .type_vector => .{ .aggregate = .{ + .type_array_big => { + const sentinel = @ptrCast( + *const [1]Index, + &ip.extra.items[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?], + ); + return .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = sentinel[0..@boolToInt(sentinel[0] != .none)] }, + } }; + }, + .type_array_small, .type_vector => .{ .aggregate = .{ .ty = ty, .storage = .{ .elems = &.{} }, } }, @@ -4799,11 +4809,9 @@ pub fn isAggregateType(ip: *const InternPool, ty: Index) bool { /// The is only legal because the initializer is not part of the hash. pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { - assert(ip.items.items(.tag)[@enumToInt(index)] == .variable); - const field_index = inline for (@typeInfo(Tag.Variable).Struct.fields, 0..) |field, field_index| { - if (comptime std.mem.eql(u8, field.name, "init")) break field_index; - } else unreachable; - ip.extra.items[ip.items.items(.data)[@enumToInt(index)] + field_index] = @enumToInt(init_index); + const item = ip.items.get(@enumToInt(index)); + assert(item.tag == .variable); + ip.extra.items[item.data + std.meta.fieldIndex(Tag.Variable, "init").?] = @enumToInt(init_index); } pub fn dump(ip: *const InternPool) void { From 828756ceebaed09a90da81a5fc9a492443e2e8bb Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 31 May 2023 23:46:39 -0400 Subject: [PATCH 155/205] InternPool: fix element pointer type computations --- src/Sema.zig | 8 ++++++-- src/codegen/llvm.zig | 3 ++- src/value.zig | 11 +++++------ 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index be531af60fda..0cc657ba017e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -28508,7 +28508,11 @@ fn beginComptimePtrLoad( .int => return error.RuntimeLoad, .eu_payload, .opt_payload => |container_ptr| blk: { const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod); - const payload_ty = ptr.ty.toType().childType(mod); + const payload_ty = switch (ptr.addr) { + .eu_payload => container_ty.errorUnionPayload(mod), + .opt_payload => container_ty.optionalChild(mod), + else => unreachable, + }; var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty); // eu_payload and opt_payload never have a well-defined layout @@ -28554,7 +28558,7 @@ fn beginComptimePtrLoad( }; }, .elem => |elem_ptr| blk: { - const elem_ty = ptr.ty.toType().elemType2(mod); + const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null); // This code assumes that elem_ptrs have been "flattened" in order for direct dereference diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5da91e557301..9d96ea457f9d 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3915,7 +3915,8 @@ pub const DeclGen = struct { const indices: [1]*llvm.Value = .{ llvm_usize.constInt(elem_ptr.index, .False), }; - const elem_llvm_ty = try dg.lowerType(ptr.ty.toType().elemType2(mod)); + const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); + const elem_llvm_ty = try dg.lowerType(elem_ty); return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .field => |field_ptr| { diff --git a/src/value.zig b/src/value.zig index aba0af176e16..c7a2a5445884 100644 --- a/src/value.zig +++ b/src/value.zig @@ -625,15 +625,14 @@ pub const Value = struct { .int => |int| int.toValue().getUnsignedIntAdvanced(mod, opt_sema), .elem => |elem| { const base_addr = (try elem.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; - const elem_size = ptr.ty.toType().elemType2(mod).abiSize(mod); - return base_addr + elem.index * elem_size; + const elem_ty = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod); + return base_addr + elem.index * elem_ty.abiSize(mod); }, .field => |field| { - const struct_ty = ptr.ty.toType().childType(mod); - if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); const base_addr = (try field.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; - const field_offset = ptr.ty.toType().childType(mod).structFieldOffset(field.index, mod); - return base_addr + field_offset; + const struct_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); + if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); + return base_addr + struct_ty.structFieldOffset(field.index, mod); }, else => null, }, From 123cfab98481554946208bd20a42ccc0c94681ef Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 1 Jun 2023 01:25:01 -0400 Subject: [PATCH 156/205] codegen: fix doubled global sentinels --- src/codegen.zig | 46 ++++++++++++++++++---------------------------- 1 file changed, 18 insertions(+), 28 deletions(-) diff --git a/src/codegen.zig b/src/codegen.zig index a4c88d1258a0..b0febb5ea72a 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -387,36 +387,26 @@ pub fn generateSymbol( } }, .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.toIntern())) { - .array_type => |array_type| { - switch (aggregate.storage) { - .bytes => |bytes| try code.appendSlice(bytes), - .elems, .repeated_elem => { - var index: u64 = 0; - while (index < array_type.len) : (index += 1) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = array_type.child.toType(), - .val = switch (aggregate.storage) { - .bytes => unreachable, - .elems => |elems| elems[@intCast(usize, index)], - .repeated_elem => |elem| elem, - }.toValue(), - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, - } + .array_type => |array_type| switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => { + var index: u64 = 0; + var len_including_sentinel = + array_type.len + @boolToInt(array_type.sentinel != .none); + while (index < len_including_sentinel) : (index += 1) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = array_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, } - }, - } - - if (array_type.sentinel != .none) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = array_type.child.toType(), - .val = array_type.sentinel.toValue(), - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, } - } + }, }, .vector_type => |vector_type| { switch (aggregate.storage) { From a3b3ac0ea482fc76684ee7d0a9b98306ebbd942c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 1 Jun 2023 01:44:08 -0400 Subject: [PATCH 157/205] llvm: fix lowering of lazy values These really should not be making it to the backend, but that's a problem for another time. --- src/codegen/llvm.zig | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 9d96ea457f9d..a727b23fccca 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3399,7 +3399,7 @@ pub const DeclGen = struct { const llvm_ptr_val = switch (ptr.addr) { .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl), .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl), - .int => |int| try dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int)), + .int => |int| try dg.lowerIntAsPtr(int.toValue()), .eu_payload, .opt_payload, .elem, @@ -3800,15 +3800,15 @@ pub const DeclGen = struct { } } - fn lowerIntAsPtr(dg: *DeclGen, int_key: InternPool.Key) Error!*llvm.Value { - switch (int_key) { + fn lowerIntAsPtr(dg: *DeclGen, val: Value) Error!*llvm.Value { + switch (dg.module.intern_pool.indexToKey(val.toIntern())) { .undef => { const llvm_usize = try dg.lowerType(Type.usize); return llvm_usize.getUndef(); }, - .int => |int| { + .int => { var bigint_space: Value.BigIntSpace = undefined; - const bigint = int.storage.toBigInt(&bigint_space); + const bigint = val.toBigInt(&bigint_space, dg.module); const llvm_int = lowerBigInt(dg, Type.usize, bigint); return llvm_int.constIntToPtr(dg.context.pointerType(0)); }, @@ -3861,11 +3861,11 @@ pub const DeclGen = struct { const mod = dg.module; const target = mod.getTarget(); return switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { - .int => |int| dg.lowerIntAsPtr(.{ .int = int }), + .int => dg.lowerIntAsPtr(ptr_val), .ptr => |ptr| switch (ptr.addr) { .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), .mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl), - .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int)), + .int => |int| dg.lowerIntAsPtr(int.toValue()), .eu_payload => |eu_ptr| { const parent_llvm_ptr = try dg.lowerParentPtr(eu_ptr.toValue(), true); From 0777e98bfe89eadeaa0e34a2ad07cf61cdf5b26a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 1 Jun 2023 01:44:39 -0400 Subject: [PATCH 158/205] Sema: disable repeated aggregate storage use with mismatching sentinel The InternPool implementation was not written to support this, but that could be changed and this check removed in the future. --- src/Sema.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Sema.zig b/src/Sema.zig index 0cc657ba017e..a0d7c483d985 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -13094,7 +13094,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const val = v: { // Optimization for the common pattern of a single element repeated N times, such // as zero-filling a byte array. - if (lhs_len == 1) { + if (lhs_len == 1 and lhs_info.sentinel == null) { const elem_val = try lhs_sub_val.elemValue(mod, 0); break :v try mod.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), From cab79b0877e9e7239045dc55d3b2bcff190016cb Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 1 Jun 2023 01:48:42 -0400 Subject: [PATCH 159/205] lib: add const to avoid regression Not sure if this was meant to be legal or not, but either way this code should have been using const anyway. --- lib/std/child_process.zig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index 7cf0f4681a6d..e051ea532ebb 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -530,7 +530,7 @@ pub const ChildProcess = struct { // can fail between fork() and execve(). // Therefore, we do all the allocation for the execve() before the fork(). // This means we must do the null-termination of argv and env vars here. - const argv_buf = try arena.allocSentinel(?[*:0]u8, self.argv.len, null); + const argv_buf = try arena.allocSentinel(?[*:0]const u8, self.argv.len, null); for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; const envp = m: { @@ -542,7 +542,7 @@ pub const ChildProcess = struct { } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @ptrCast([*:null]?[*:0]u8, os.environ.ptr); + break :m @ptrCast([*:null]?[*:0]const u8, os.environ.ptr); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process"); @@ -1425,9 +1425,9 @@ pub fn createWindowsEnvBlock(allocator: mem.Allocator, env_map: *const EnvMap) ! return try allocator.realloc(result, i); } -pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const EnvMap) ![:null]?[*:0]u8 { +pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const EnvMap) ![:null]?[*:0]const u8 { const envp_count = env_map.count(); - const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null); + const envp_buf = try arena.allocSentinel(?[*:0]const u8, envp_count, null); { var it = env_map.iterator(); var i: usize = 0; From 9b48fc2833be409902f4d3256d2d921f4f924e0d Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 1 Jun 2023 08:10:45 +0100 Subject: [PATCH 160/205] Allocate capture scopes in gpa instead of Decl.value_arena This eliminates the last major use of value_arena, in preparation to remove it. --- src/Module.zig | 76 ++++++++++++++++++++++++----------------- src/Sema.zig | 91 ++++++++++++++++++++++++++++++-------------------- 2 files changed, 101 insertions(+), 66 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index cf0d222a2ec8..06e8c53eb778 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -277,61 +277,75 @@ pub const Export = struct { }; pub const CaptureScope = struct { + refs: u32, parent: ?*CaptureScope, /// Values from this decl's evaluation that will be closed over in - /// child decls. Values stored in the value_arena of the linked decl. - /// During sema, this map is backed by the gpa. Once sema completes, - /// it is reallocated using the value_arena. - captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, TypedValue) = .{}, + /// child decls. This map is backed by the gpa, and deinited when + /// the refcount reaches 0. + captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Capture) = .{}, - pub fn failed(noalias self: *const @This()) bool { + pub const Capture = union(enum) { + comptime_val: InternPool.Index, // index of value + runtime_val: InternPool.Index, // index of type + }; + + pub fn failed(noalias self: *const CaptureScope) bool { return self.captures.available == 0 and self.captures.size == std.math.maxInt(u32); } - pub fn fail(noalias self: *@This()) void { + pub fn fail(noalias self: *CaptureScope, gpa: Allocator) void { + self.captures.deinit(gpa); self.captures.available = 0; self.captures.size = std.math.maxInt(u32); } + + pub fn incRef(self: *CaptureScope) void { + self.refs += 1; + } + + pub fn decRef(self: *CaptureScope, gpa: Allocator) void { + self.refs -= 1; + if (self.refs > 0) return; + if (self.parent) |p| p.decRef(gpa); + if (!self.failed()) { + self.captures.deinit(gpa); + } + } }; pub const WipCaptureScope = struct { scope: *CaptureScope, finalized: bool, gpa: Allocator, - perm_arena: Allocator, - pub fn init(gpa: Allocator, perm_arena: Allocator, parent: ?*CaptureScope) !@This() { - const scope = try perm_arena.create(CaptureScope); - scope.* = .{ .parent = parent }; - return @This(){ + pub fn init(gpa: Allocator, parent: ?*CaptureScope) !WipCaptureScope { + const scope = try gpa.create(CaptureScope); + if (parent) |p| p.incRef(); + scope.* = .{ .refs = 1, .parent = parent }; + return .{ .scope = scope, .finalized = false, .gpa = gpa, - .perm_arena = perm_arena, }; } - pub fn finalize(noalias self: *@This()) !void { - assert(!self.finalized); - // use a temp to avoid unintentional aliasing due to RLS - const tmp = try self.scope.captures.clone(self.perm_arena); - self.scope.captures.deinit(self.gpa); - self.scope.captures = tmp; + pub fn finalize(noalias self: *WipCaptureScope) !void { self.finalized = true; } - pub fn reset(noalias self: *@This(), parent: ?*CaptureScope) !void { - if (!self.finalized) try self.finalize(); - self.scope = try self.perm_arena.create(CaptureScope); - self.scope.* = .{ .parent = parent }; - self.finalized = false; + pub fn reset(noalias self: *WipCaptureScope, parent: ?*CaptureScope) !void { + self.scope.decRef(self.gpa); + self.scope = try self.gpa.create(CaptureScope); + if (parent) |p| p.incRef(); + self.scope.* = .{ .refs = 1, .parent = parent }; } - pub fn deinit(noalias self: *@This()) void { - if (!self.finalized) { - self.scope.captures.deinit(self.gpa); - self.scope.fail(); + pub fn deinit(noalias self: *WipCaptureScope) void { + if (self.finalized) { + self.scope.decRef(self.gpa); + } else { + self.scope.fail(self.gpa); } self.* = undefined; } @@ -3311,6 +3325,7 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { mod.destroyNamespace(i); } } + if (decl.src_scope) |scope| scope.decRef(gpa); decl.clearValues(mod); decl.dependants.deinit(gpa); decl.dependencies.deinit(gpa); @@ -4425,7 +4440,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, new_decl_arena_allocator, null); + var wip_captures = try WipCaptureScope.init(gpa, null); defer wip_captures.deinit(); if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| { @@ -4538,7 +4553,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { } log.debug("semaDecl {*} ({s})", .{ decl, decl.name }); - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var block_scope: Sema.Block = .{ @@ -5499,7 +5514,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE try sema.air_extra.ensureTotalCapacity(gpa, reserved_count); sema.air_extra.items.len += reserved_count; - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var inner_block: Sema.Block = .{ @@ -5771,6 +5786,7 @@ pub fn allocateNewDecl( }; }; + if (src_scope) |scope| scope.incRef(); decl_and_index.new_decl.* = .{ .name = undefined, .src_namespace = namespace, diff --git a/src/Sema.zig b/src/Sema.zig index a0d7c483d985..cecf59f2c362 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -889,17 +889,18 @@ fn analyzeBodyInner( try sema.inst_map.ensureSpaceForInstructions(sema.gpa, body); + // Most of the time, we don't need to construct a new capture scope for a + // block. However, successive iterations of comptime loops can capture + // different values for the same Zir.Inst.Index, so in those cases, we will + // have to create nested capture scopes; see the `.repeat` case below. const parent_capture_scope = block.wip_capture_scope; - - var wip_captures = WipCaptureScope{ - .finalized = true, + parent_capture_scope.incRef(); + var wip_captures: WipCaptureScope = .{ .scope = parent_capture_scope, - .perm_arena = sema.perm_arena, .gpa = sema.gpa, + .finalized = true, // don't finalize the parent scope }; - defer if (wip_captures.scope != parent_capture_scope) { - wip_captures.deinit(); - }; + defer wip_captures.deinit(); const mod = sema.mod; const map = &sema.inst_map; @@ -1452,6 +1453,11 @@ fn analyzeBodyInner( const src = LazySrcLoc.nodeOffset(datas[inst].node); try sema.emitBackwardBranch(block, src); if (wip_captures.scope.captures.count() != orig_captures) { + // We need to construct new capture scopes for the next loop iteration so it + // can capture values without clobbering the earlier iteration's captures. + // At first, we reused the parent capture scope as an optimization, but for + // successive scopes we have to create new ones as children of the parent + // scope. try wip_captures.reset(parent_capture_scope); block.wip_capture_scope = wip_captures.scope; orig_captures = 0; @@ -1467,6 +1473,11 @@ fn analyzeBodyInner( const src = LazySrcLoc.nodeOffset(datas[inst].node); try sema.emitBackwardBranch(block, src); if (wip_captures.scope.captures.count() != orig_captures) { + // We need to construct new capture scopes for the next loop iteration so it + // can capture values without clobbering the earlier iteration's captures. + // At first, we reused the parent capture scope as an optimization, but for + // successive scopes we have to create new ones as children of the parent + // scope. try wip_captures.reset(parent_capture_scope); block.wip_capture_scope = wip_captures.scope; orig_captures = 0; @@ -1749,6 +1760,8 @@ fn analyzeBodyInner( if (noreturn_inst) |some| try block.instructions.append(sema.gpa, some); if (!wip_captures.finalized) { + // We've updated the capture scope due to a `repeat` instruction where + // the body had a capture; finalize our child scope and reset try wip_captures.finalize(); block.wip_capture_scope = parent_capture_scope; } @@ -3007,7 +3020,7 @@ fn zirEnumDecl( defer sema.func = prev_func; defer sema.func_index = prev_func_index; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, new_decl.src_scope); defer wip_captures.deinit(); var enum_block: Block = .{ @@ -6888,7 +6901,7 @@ fn analyzeCall( sema.error_return_trace_index_on_fn_entry = block.error_return_trace_index; defer sema.error_return_trace_index_on_fn_entry = parent_err_ret_index; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, fn_owner_decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, fn_owner_decl.src_scope); defer wip_captures.deinit(); var child_block: Block = .{ @@ -7751,7 +7764,7 @@ fn resolveGenericInstantiationType( }; defer child_sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, new_decl.src_scope); defer wip_captures.deinit(); var child_block: Block = .{ @@ -11151,7 +11164,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); + var wip_captures = try WipCaptureScope.init(gpa, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); @@ -11396,7 +11409,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var cond_body = try case_block.instructions.toOwnedSlice(gpa); defer gpa.free(cond_body); - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); + var wip_captures = try WipCaptureScope.init(gpa, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); @@ -11577,7 +11590,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError }), }; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); + var wip_captures = try WipCaptureScope.init(gpa, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); @@ -15720,13 +15733,15 @@ fn zirClosureCapture( // ...in which case the closure_capture instruction has access to a runtime // value only. In such case we preserve the type and use a dummy runtime value. const operand = try sema.resolveInst(inst_data.operand); - const val = (try sema.resolveMaybeUndefValAllowVariables(operand)) orelse - Value.@"unreachable"; - - try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, .{ - .ty = sema.typeOf(operand), - .val = try val.copy(sema.perm_arena), - }); + const ty = sema.typeOf(operand); + const capture: CaptureScope.Capture = blk: { + if (try sema.resolveMaybeUndefValAllowVariables(operand)) |val| { + const ip_index = try val.intern(ty, sema.mod); + break :blk .{ .comptime_val = ip_index }; + } + break :blk .{ .runtime_val = ty.toIntern() }; + }; + try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, capture); } fn zirClosureGet( @@ -15740,7 +15755,7 @@ fn zirClosureGet( var scope: *CaptureScope = mod.declPtr(block.src_decl).src_scope.?; // Note: The target closure must be in this scope list. // If it's not here, the zir is invalid, or the list is broken. - const tv = while (true) { + const capture = while (true) { // Note: We don't need to add a dependency here, because // decls always depend on their lexical parents. @@ -15753,13 +15768,13 @@ fn zirClosureGet( } return error.AnalysisFail; } - if (scope.captures.getPtr(inst_data.inst)) |tv| { - break tv; + if (scope.captures.get(inst_data.inst)) |capture| { + break capture; } scope = scope.parent.?; }; - if (tv.val.toIntern() == .unreachable_value and !block.is_typeof and sema.func_index == .none) { + if (capture == .runtime_val and !block.is_typeof and sema.func_index == .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); @@ -15787,7 +15802,7 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.toIntern() == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { + if (capture == .runtime_val and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); @@ -15817,13 +15832,17 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.toIntern() == .unreachable_value) { - assert(block.is_typeof); - // We need a dummy runtime instruction with the correct type. - return block.addTy(.alloc, tv.ty); + switch (capture) { + .runtime_val => |ty_ip_index| { + assert(block.is_typeof); + // We need a dummy runtime instruction with the correct type. + return block.addTy(.alloc, ty_ip_index.toType()); + }, + .comptime_val => |val_ip_index| { + const ty = mod.intern_pool.typeOf(val_ip_index).toType(); + return sema.addConstant(ty, val_ip_index.toValue()); + }, } - - return sema.addConstant(tv.ty, tv.val); } fn zirRetAddr( @@ -31838,7 +31857,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var block: Block = .{ @@ -32591,7 +32610,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var block_scope: Block = .{ @@ -32933,7 +32952,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var block_scope: Block = .{ @@ -33360,7 +33379,7 @@ fn generateUnionTagTypeSimple( } fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { - var wip_captures = try WipCaptureScope.init(sema.gpa, sema.perm_arena, sema.owner_decl.src_scope); + var wip_captures = try WipCaptureScope.init(sema.gpa, sema.owner_decl.src_scope); defer wip_captures.deinit(); var block: Block = .{ @@ -33405,7 +33424,7 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { const ty_inst = try sema.getBuiltin(name); - var wip_captures = try WipCaptureScope.init(sema.gpa, sema.perm_arena, sema.owner_decl.src_scope); + var wip_captures = try WipCaptureScope.init(sema.gpa, sema.owner_decl.src_scope); defer wip_captures.deinit(); var block: Block = .{ From 8299ddfe4ff506b45cfd58b2a6eb048d8be05b9e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 1 Jun 2023 19:48:36 -0400 Subject: [PATCH 161/205] InternPool: fix more key lifetime issues Reminder to look into deleting `get` and make keys less pointery and more long lived. --- src/Sema.zig | 2 +- src/codegen/llvm.zig | 11 ++++++----- src/value.zig | 3 ++- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index cecf59f2c362..385ea7e97584 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -16452,7 +16452,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const enum_field_vals = try sema.arena.alloc(InternPool.Index, enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { - const name_ip = enum_type.names[i]; + const name_ip = mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names[i]; const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index a727b23fccca..be6ca714a613 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2331,14 +2331,15 @@ pub const Object = struct { try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } - for (mod.typeToFunc(ty).?.param_types) |param_ty| { - if (!param_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + for (0..mod.typeToFunc(ty).?.param_types.len) |i| { + const param_ty = mod.typeToFunc(ty).?.param_types[i].toType(); + if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - if (isByRef(param_ty.toType(), mod)) { - const ptr_ty = try mod.singleMutPtrType(param_ty.toType()); + if (isByRef(param_ty, mod)) { + const ptr_ty = try mod.singleMutPtrType(param_ty); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } else { - try param_di_types.append(try o.lowerDebugType(param_ty.toType(), .full)); + try param_di_types.append(try o.lowerDebugType(param_ty, .full)); } } diff --git a/src/value.zig b/src/value.zig index c7a2a5445884..8b68dd8b556e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2119,7 +2119,8 @@ pub const Value = struct { if (try (try val.elemValue(mod, index)).anyUndef(mod)) break true; } else false, }, - .aggregate => |aggregate| for (aggregate.storage.values()) |elem| { + .aggregate => |aggregate| for (0..aggregate.storage.values().len) |i| { + const elem = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.values()[i]; if (try anyUndef(elem.toValue(), mod)) break true; } else false, else => false, From e2174428e81fc5d84c54ae4833002d943adab38a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 1 Jun 2023 19:49:57 -0400 Subject: [PATCH 162/205] wasm: implement missing case --- src/arch/wasm/CodeGen.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 9403223f30ed..7b1258155c61 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3320,6 +3320,7 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod), .int => |int| intStorageAsI32(int.storage, mod), .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod), + .err => |err| @bitCast(i32, mod.global_error_set.get(mod.intern_pool.stringToSlice(err.name)).?), else => unreachable, }, } From dc18739a738adc7fd549f9d43db2314275c51c03 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 1 Jun 2023 21:03:33 -0400 Subject: [PATCH 163/205] process: add more missing const --- lib/std/process.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/std/process.zig b/lib/std/process.zig index 80be7051876f..f5972eda1e91 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -1131,7 +1131,7 @@ pub fn execve( defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null); + const argv_buf = try arena.allocSentinel(?[*:0]const u8, argv.len, null); for (argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; const envp = m: { @@ -1143,7 +1143,7 @@ pub fn execve( } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @ptrCast([*:null]?[*:0]u8, os.environ.ptr); + break :m @ptrCast([*:null]?[*:0]const u8, os.environ.ptr); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: std.process.execv implementation has no way to collect the environment variables to forward to the child process"); From e8bcdca044603fb5ea93fc94028dfd8bdd22fcf3 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 1 Jun 2023 21:03:53 -0400 Subject: [PATCH 164/205] Sema: fix in-memory coercion during comptime load --- src/Sema.zig | 44 ++++++++++++++++++++++++++++---------------- src/value.zig | 1 + 2 files changed, 29 insertions(+), 16 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 385ea7e97584..9b36ddb969ed 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -22472,9 +22472,7 @@ fn analyzeMinMax( if (std.debug.runtime_safety) { assert(try sema.intFitsInType(val, refined_ty, null)); } - cur_minmax = try sema.addConstant(refined_ty, (try sema.resolveMaybeUndefVal( - try sema.coerceInMemory(block, val, orig_ty, refined_ty, src), - )).?); + cur_minmax = try sema.coerceInMemory(block, val, orig_ty, refined_ty, src); } break :refined refined_ty; @@ -26659,16 +26657,16 @@ fn coerceExtra( return sema.failWithOwnedErrorMsg(msg); } -fn coerceInMemory( +fn coerceValueInMemory( sema: *Sema, block: *Block, val: Value, src_ty: Type, dst_ty: Type, dst_ty_src: LazySrcLoc, -) CompileError!Air.Inst.Ref { +) CompileError!Value { const mod = sema.mod; - switch (mod.intern_pool.indexToKey(val.toIntern())) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .aggregate => |aggregate| { const dst_ty_key = mod.intern_pool.indexToKey(dst_ty.toIntern()); const dest_len = try sema.usizeCast( @@ -26688,14 +26686,14 @@ fn coerceInMemory( else => unreachable, }; if (src_ty_child != dst_ty_child) break :direct; - return try sema.addConstant(dst_ty, (try mod.intern(.{ .aggregate = .{ + return (try mod.intern(.{ .aggregate = .{ .ty = dst_ty.toIntern(), .storage = switch (aggregate.storage) { .bytes => |bytes| .{ .bytes = bytes[0..dest_len] }, .elems => |elems| .{ .elems = elems[0..dest_len] }, .repeated_elem => |elem| .{ .repeated_elem = elem }, }, - } })).toValue()); + } })).toValue(); } const dest_elems = try sema.arena.alloc(InternPool.Index, dest_len); for (dest_elems, 0..) |*dest_elem, i| { @@ -26712,17 +26710,28 @@ fn coerceInMemory( .repeated_elem => |elem| elem, }, elem_ty); } - return sema.addConstant(dst_ty, (try mod.intern(.{ .aggregate = .{ + return (try mod.intern(.{ .aggregate = .{ .ty = dst_ty.toIntern(), .storage = .{ .elems = dest_elems }, - } })).toValue()); + } })).toValue(); }, - .float => |float| return sema.addConstant(dst_ty, (try mod.intern(.{ .float = .{ + .float => |float| (try mod.intern(.{ .float = .{ .ty = dst_ty.toIntern(), .storage = float.storage, - } })).toValue()), - else => return sema.addConstant(dst_ty, try mod.getCoerced(val, dst_ty)), - } + } })).toValue(), + else => try mod.getCoerced(val, dst_ty), + }; +} + +fn coerceInMemory( + sema: *Sema, + block: *Block, + val: Value, + src_ty: Type, + dst_ty: Type, + dst_ty_src: LazySrcLoc, +) CompileError!Air.Inst.Ref { + return sema.addConstant(dst_ty, try sema.coerceValueInMemory(block, val, src_ty, dst_ty, dst_ty_src)); } const InMemoryCoercionResult = union(enum) { @@ -33935,8 +33944,11 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value if (coerce_in_mem_ok) { // We have a Value that lines up in virtual memory exactly with what we want to load, // and it is in-memory coercible to load_ty. It may be returned without modifications. - // Move mutable decl values to the InternPool and assert other decls are already in the InternPool. - return .{ .val = (if (deref.is_mutable) try tv.val.intern(tv.ty, mod) else tv.val.toIntern()).toValue() }; + // Move mutable decl values to the InternPool and assert other decls are already in + // the InternPool. + const uncoerced_val = if (deref.is_mutable) try tv.val.intern(tv.ty, mod) else tv.val.toIntern(); + const coerced_val = try sema.coerceValueInMemory(block, uncoerced_val.toValue(), tv.ty, load_ty, src); + return .{ .val = coerced_val }; } } diff --git a/src/value.zig b/src/value.zig index 8b68dd8b556e..6a19678d7152 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1842,6 +1842,7 @@ pub const Value = struct { pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { return switch (val.ip_index) { .none => switch (val.tag()) { + .bytes => try mod.intValue(Type.u8, val.castTag(.bytes).?.data[index]), .repeated => val.castTag(.repeated).?.data, .aggregate => val.castTag(.aggregate).?.data[index], .slice => val.castTag(.slice).?.data.ptr.elemValue(mod, index), From e0179640d54f4a61aa7522ac8529d36769fb9c08 Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 2 Jun 2023 01:55:16 +0100 Subject: [PATCH 165/205] Sema: intern values of mutable decls after analysis This is necessary with the upcoming removal of Decl.value_arena to prevent UAF of these values. --- src/Module.zig | 24 ++++++++++++++++++++++++ src/Sema.zig | 51 +++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 74 insertions(+), 1 deletion(-) diff --git a/src/Module.zig b/src/Module.zig index 06e8c53eb778..d575f89b4186 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4424,6 +4424,9 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { defer sema_arena.deinit(); const sema_arena_allocator = sema_arena.allocator(); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + var sema: Sema = .{ .mod = mod, .gpa = gpa, @@ -4437,6 +4440,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); @@ -4445,6 +4449,10 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| { try wip_captures.finalize(); + for (comptime_mutable_decls.items) |decl_index| { + const decl = mod.declPtr(decl_index); + try decl.intern(mod); + } new_decl.analysis = .complete; } else |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, @@ -4522,6 +4530,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { defer analysis_arena.deinit(); const analysis_arena_allocator = analysis_arena.allocator(); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + var sema: Sema = .{ .mod = mod, .gpa = gpa, @@ -4535,6 +4546,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); @@ -4577,6 +4589,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { const body = zir.extra[extra.end..][0..extra.data.body_len]; const result_ref = (try sema.analyzeBodyBreak(&block_scope, body)).?.operand; try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = 0 }; const section_src: LazySrcLoc = .{ .node_offset_var_decl_section = 0 }; const address_space_src: LazySrcLoc = .{ .node_offset_var_decl_addrspace = 0 }; @@ -5486,6 +5502,9 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + const fn_ty = decl.ty; const fn_ty_info = mod.typeToFunc(fn_ty).?; @@ -5503,6 +5522,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE .owner_func = func, .owner_func_index = func_index.toOptional(), .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); @@ -5642,6 +5662,10 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE } try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } // Copy the block into place and mark that as the main block. try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + diff --git a/src/Sema.zig b/src/Sema.zig index 9b36ddb969ed..620a8a6a28a3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -92,6 +92,14 @@ no_partial_func_ty: bool = false, /// here so the values can be dropped without any cleanup. unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{}, +/// Indices of comptime-mutable decls created by this Sema. These decls' values +/// should be interned after analysis completes, as they may refer to memory in +/// the Sema arena. +/// TODO: this is a workaround for memory bugs triggered by the removal of +/// Decl.value_arena. A better solution needs to be found. Probably this will +/// involve transitioning comptime-mutable memory away from using Decls at all. +comptime_mutable_decls: *std.ArrayList(Decl.Index), + const std = @import("std"); const math = std.math; const mem = std.mem; @@ -2545,6 +2553,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE }, }); try sema.maybeQueueFuncBodyAnalysis(decl_index); + try sema.comptime_mutable_decls.append(decl_index); return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{ .ty = ptr_ty.toIntern(), .addr = .{ .mut_decl = .{ @@ -7761,6 +7770,7 @@ fn resolveGenericInstantiationType( .is_generic_instantiation = true, .branch_quota = sema.branch_quota, .branch_count = sema.branch_count, + .comptime_mutable_decls = sema.comptime_mutable_decls, }; defer child_sema.deinit(); @@ -31863,7 +31873,24 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); - var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none }; + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + + var sema: Sema = .{ + .mod = mod, + .gpa = gpa, + .arena = analysis_arena.allocator(), + .perm_arena = decl_arena_allocator, + .code = zir, + .owner_decl = decl, + .owner_decl_index = decl_index, + .func = null, + .func_index = .none, + .fn_ret_ty = Type.void, + .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, + }; defer sema.deinit(); var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); @@ -31899,6 +31926,10 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum); struct_obj.backing_int_ty = backing_int_ty; try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } } else { if (fields_bit_sum > std.math.maxInt(u16)) { var sema: Sema = .{ @@ -31914,6 +31945,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none, + .comptime_mutable_decls = undefined, }; defer sema.deinit(); @@ -32603,6 +32635,9 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + var sema: Sema = .{ .mod = mod, .gpa = gpa, @@ -32616,6 +32651,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); @@ -32886,6 +32922,10 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } } try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } struct_obj.have_field_inits = true; } @@ -32945,6 +32985,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + var sema: Sema = .{ .mod = mod, .gpa = gpa, @@ -32958,6 +33001,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); @@ -32984,6 +33028,10 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); @@ -33821,6 +33869,7 @@ fn analyzeComptimeAlloc( const decl = sema.mod.declPtr(decl_index); decl.@"align" = alignment; + try sema.comptime_mutable_decls.append(decl_index); try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); return sema.addConstant(ptr_type, (try sema.mod.intern(.{ .ptr = .{ .ty = ptr_type.toIntern(), From 34dae73005baa3be54e0d9e0725ab31cb0723a06 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 1 Jun 2023 23:45:36 -0700 Subject: [PATCH 166/205] std.hash: auto hash signed ints as bitcasts of unsigned ints --- lib/std/hash/auto_hash.zig | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig index 0c88caae7e1e..251ac120f6e0 100644 --- a/lib/std/hash/auto_hash.zig +++ b/lib/std/hash/auto_hash.zig @@ -91,15 +91,21 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void { // Help the optimizer see that hashing an int is easy by inlining! // TODO Check if the situation is better after #561 is resolved. - .Int => { - if (comptime meta.trait.hasUniqueRepresentation(Key)) { - @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) }); - } else { - // Take only the part containing the key value, the remaining - // bytes are undefined and must not be hashed! - const byte_size = comptime std.math.divCeil(comptime_int, @bitSizeOf(Key), 8) catch unreachable; - @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key)[0..byte_size] }); - } + .Int => |int| switch (int.signedness) { + .signed => hash(hasher, @bitCast(@Type(.{ .Int = .{ + .bits = int.bits, + .signedness = .unsigned, + } }), key), strat), + .unsigned => { + if (comptime meta.trait.hasUniqueRepresentation(Key)) { + @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) }); + } else { + // Take only the part containing the key value, the remaining + // bytes are undefined and must not be hashed! + const byte_size = comptime std.math.divCeil(comptime_int, @bitSizeOf(Key), 8) catch unreachable; + @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key)[0..byte_size] }); + } + }, }, .Bool => hash(hasher, @boolToInt(key), strat), From 69b7b910929e84248671377e1743477757e66837 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 1 Jun 2023 23:46:04 -0700 Subject: [PATCH 167/205] compiler: eliminate Decl.value_arena and Sema.perm_arena The main motivation for this commit is eliminating Decl.value_arena. Everything else is dominoes. Decl.name used to be stored in the GPA, now it is stored in InternPool. It ended up being simpler to migrate other strings to be interned as well, such as struct field names, union field names, and a few others. This ended up requiring a big diff, sorry about that. But the changes are pretty nice, we finally start to take advantage of InternPool's existence. global_error_set and error_name_list are simplified. Now it is a single ArrayHashMap(NullTerminatedString, void) and the index is the error tag value. Module.tmp_hack_arena is re-introduced (it was removed in eeff407941560ce8eb5b737b2436dfa93cfd3a0c) in order to deal with comptime_args, optimized_order, and struct and union fields. After structs and unions get moved into InternPool properly, tmp_hack_arena can be deleted again. --- src/Compilation.zig | 21 +- src/InternPool.zig | 33 + src/Module.zig | 332 ++++------ src/Sema.zig | 1204 ++++++++++++++++++---------------- src/TypedValue.zig | 27 +- src/arch/aarch64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 - src/arch/wasm/CodeGen.zig | 26 +- src/arch/x86_64/CodeGen.zig | 2 +- src/codegen.zig | 20 +- src/codegen/c.zig | 72 +- src/codegen/c/type.zig | 28 +- src/codegen/llvm.zig | 122 ++-- src/codegen/spirv.zig | 13 +- src/link.zig | 11 - src/link/C.zig | 9 +- src/link/Coff.zig | 50 +- src/link/Dwarf.zig | 25 +- src/link/Elf.zig | 33 +- src/link/MachO.zig | 29 +- src/link/Plan9.zig | 30 +- src/link/SpirV.zig | 5 +- src/link/Wasm.zig | 45 +- src/print_air.zig | 3 +- src/type.zig | 32 +- src/value.zig | 116 ++-- 26 files changed, 1160 insertions(+), 1132 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 0ee916c44681..64f947c3c338 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1317,7 +1317,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .global_zir_cache = global_zir_cache, .local_zir_cache = local_zir_cache, .emit_h = emit_h, - .error_name_list = .{}, + .tmp_hack_arena = std.heap.ArenaAllocator.init(gpa), }; try module.init(); @@ -2627,7 +2627,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { var it = module.failed_files.iterator(); while (it.next()) |entry| { if (entry.value_ptr.*) |msg| { - try addModuleErrorMsg(&bundle, msg.*); + try addModuleErrorMsg(module, &bundle, msg.*); } else { // Must be ZIR errors. Note that this may include AST errors. // addZirErrorMessages asserts that the tree is loaded. @@ -2640,7 +2640,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { var it = module.failed_embed_files.iterator(); while (it.next()) |entry| { const msg = entry.value_ptr.*; - try addModuleErrorMsg(&bundle, msg.*); + try addModuleErrorMsg(module, &bundle, msg.*); } } { @@ -2650,7 +2650,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. if (module.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(&bundle, entry.value_ptr.*.*); + try addModuleErrorMsg(module, &bundle, entry.value_ptr.*.*); if (module.cimport_errors.get(entry.key_ptr.*)) |cimport_errors| for (cimport_errors) |c_error| { try bundle.addRootErrorMessage(.{ .msg = try bundle.addString(std.mem.span(c_error.msg)), @@ -2675,12 +2675,12 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. if (module.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(&bundle, entry.value_ptr.*.*); + try addModuleErrorMsg(module, &bundle, entry.value_ptr.*.*); } } } for (module.failed_exports.values()) |value| { - try addModuleErrorMsg(&bundle, value.*); + try addModuleErrorMsg(module, &bundle, value.*); } } @@ -2728,7 +2728,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { }; } - try addModuleErrorMsg(&bundle, err_msg); + try addModuleErrorMsg(module, &bundle, err_msg); } } @@ -2784,8 +2784,9 @@ pub const ErrorNoteHashContext = struct { } }; -pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void { +pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void { const gpa = eb.gpa; + const ip = &mod.intern_pool; const err_source = module_err_msg.src_loc.file_scope.getSource(gpa) catch |err| { const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa); defer gpa.free(file_path); @@ -2811,7 +2812,7 @@ pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) .src_loc = .none, }); break; - } else if (module_reference.decl == null) { + } else if (module_reference.decl == .none) { try ref_traces.append(gpa, .{ .decl_name = 0, .src_loc = .none, @@ -2824,7 +2825,7 @@ pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) const rt_file_path = try module_reference.src_loc.file_scope.fullPath(gpa); defer gpa.free(rt_file_path); try ref_traces.append(gpa, .{ - .decl_name = try eb.addString(std.mem.sliceTo(module_reference.decl.?, 0)), + .decl_name = try eb.addString(ip.stringToSliceUnwrap(module_reference.decl).?), .src_loc = try eb.addSourceLocation(.{ .src_path = try eb.addString(rt_file_path), .span_start = span.start, diff --git a/src/InternPool.zig b/src/InternPool.zig index d593ad1e173f..ecdd30d1109c 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -124,6 +124,8 @@ pub const String = enum(u32) { /// An index into `string_bytes`. pub const NullTerminatedString = enum(u32) { + /// This is distinct from `none` - it is a valid index that represents empty string. + empty = 0, _, pub fn toString(self: NullTerminatedString) String { @@ -157,6 +159,8 @@ pub const NullTerminatedString = enum(u32) { /// An index into `string_bytes` which might be `none`. pub const OptionalNullTerminatedString = enum(u32) { + /// This is distinct from `none` - it is a valid index that represents empty string. + empty = 0, none = std.math.maxInt(u32), _, @@ -2447,6 +2451,9 @@ pub const MemoizedCall = struct { pub fn init(ip: *InternPool, gpa: Allocator) !void { assert(ip.items.len == 0); + // Reserve string index 0 for an empty string. + assert((try ip.getOrPutString(gpa, "")) == .empty); + // So that we can use `catch unreachable` below. try ip.items.ensureUnusedCapacity(gpa, static_keys.len); try ip.map.ensureUnusedCapacity(gpa, static_keys.len); @@ -5222,6 +5229,28 @@ pub fn getOrPutString( return ip.getOrPutTrailingString(gpa, s.len + 1); } +pub fn getOrPutStringFmt( + ip: *InternPool, + gpa: Allocator, + comptime format: []const u8, + args: anytype, +) Allocator.Error!NullTerminatedString { + const start = ip.string_bytes.items.len; + try ip.string_bytes.writer(gpa).print(format, args); + try ip.string_bytes.append(gpa, 0); + return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); +} + +pub fn getOrPutStringOpt( + ip: *InternPool, + gpa: Allocator, + optional_string: ?[]const u8, +) Allocator.Error!OptionalNullTerminatedString { + const s = optional_string orelse return .none; + const interned = try getOrPutString(ip, gpa, s); + return interned.toOptional(); +} + /// Uses the last len bytes of ip.string_bytes as the key. pub fn getOrPutTrailingString( ip: *InternPool, @@ -5273,6 +5302,10 @@ pub fn stringToSliceUnwrap(ip: *const InternPool, s: OptionalNullTerminatedStrin return ip.stringToSlice(s.unwrap() orelse return null); } +pub fn stringEqlSlice(ip: *const InternPool, a: NullTerminatedString, b: []const u8) bool { + return std.mem.eql(u8, stringToSlice(ip, a), b); +} + pub fn typeOf(ip: *const InternPool, index: Index) Index { // This optimization of static keys is required so that typeOf can be called // on static keys that haven't been added yet during static key initialization. diff --git a/src/Module.zig b/src/Module.zig index d575f89b4186..1e75ab037d6e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -88,6 +88,14 @@ embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, /// Stores all Type and Value objects; periodically garbage collected. intern_pool: InternPool = .{}, +/// To be eliminated in a future commit by moving more data into InternPool. +/// Current uses that must be eliminated: +/// * Struct comptime_args +/// * Struct optimized_order +/// * Union fields +/// This memory lives until the Module is destroyed. +tmp_hack_arena: std.heap.ArenaAllocator, + /// This is currently only used for string literals. memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, @@ -125,13 +133,8 @@ cimport_errors: std.AutoArrayHashMapUnmanaged(Decl.Index, []CImportError) = .{}, /// contains Decls that need to be deleted if they end up having no references to them. deletion_set: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, -/// Error tags and their values, tag names are duped with mod.gpa. -/// Corresponds with `error_name_list`. -global_error_set: std.StringHashMapUnmanaged(ErrorInt) = .{}, - -/// ErrorInt -> []const u8 for fast lookups for @intToError at comptime -/// Corresponds with `global_error_set`. -error_name_list: ArrayListUnmanaged([]const u8), +/// Key is the error name, index is the error tag value. Index 0 has a length-0 string. +global_error_set: GlobalErrorSet = .{}, /// Incrementing integer used to compare against the corresponding Decl /// field to determine whether a Decl's status applies to an ongoing update, or a @@ -182,6 +185,8 @@ reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { src: LazySrcLoc, }) = .{}, +pub const GlobalErrorSet = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); + pub const CImportError = struct { offset: u32, line: u32, @@ -248,7 +253,11 @@ pub const GlobalEmitH = struct { pub const ErrorInt = u32; pub const Export = struct { - options: std.builtin.ExportOptions, + name: InternPool.NullTerminatedString, + linkage: std.builtin.GlobalLinkage, + section: InternPool.OptionalNullTerminatedString, + visibility: std.builtin.SymbolVisibility, + src: LazySrcLoc, /// The Decl that performs the export. Note that this is *not* the Decl being exported. owner_decl: Decl.Index, @@ -392,8 +401,7 @@ const ValueArena = struct { }; pub const Decl = struct { - /// Allocated with Module's allocator; outlives the ZIR code. - name: [*:0]const u8, + name: InternPool.NullTerminatedString, /// The most recent Type of the Decl after a successful semantic analysis. /// Populated when `has_tv`. ty: Type, @@ -401,15 +409,11 @@ pub const Decl = struct { /// Populated when `has_tv`. val: Value, /// Populated when `has_tv`. - /// Points to memory inside value_arena. - @"linksection": ?[*:0]const u8, + @"linksection": InternPool.OptionalNullTerminatedString, /// Populated when `has_tv`. @"align": u32, /// Populated when `has_tv`. @"addrspace": std.builtin.AddressSpace, - /// The memory for ty, val, align, linksection, and captures. - /// If this is `null` then there is no memory management needed. - value_arena: ?*ValueArena = null, /// The direct parent namespace of the Decl. /// Reference to externally owned memory. /// In the case of the Decl corresponding to a file, this is @@ -564,13 +568,7 @@ pub const Decl = struct { function_body, }; - pub fn clearName(decl: *Decl, gpa: Allocator) void { - gpa.free(mem.sliceTo(decl.name, 0)); - decl.name = undefined; - } - pub fn clearValues(decl: *Decl, mod: *Module) void { - const gpa = mod.gpa; if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); if (mod.funcPtr(func).comptime_args != null) { @@ -579,19 +577,6 @@ pub const Decl = struct { mod.destroyFunc(func); } _ = mod.memoized_decls.remove(decl.val.ip_index); - if (decl.value_arena) |value_arena| { - value_arena.deinit(gpa); - decl.value_arena = null; - decl.has_tv = false; - decl.owns_tv = false; - } - } - - pub fn finalizeNewArena(decl: *Decl, arena: *std.heap.ArenaAllocator) !void { - assert(decl.value_arena == null); - const value_arena = try arena.allocator().create(ValueArena); - value_arena.* = .{ .state = arena.state }; - decl.value_arena = value_arena; } /// This name is relative to the containing namespace of the decl. @@ -692,7 +677,7 @@ pub const Decl = struct { } pub fn renderFullyQualifiedName(decl: Decl, mod: *Module, writer: anytype) !void { - const unqualified_name = mem.sliceTo(decl.name, 0); + const unqualified_name = mod.intern_pool.stringToSlice(decl.name); if (decl.name_fully_qualified) { return writer.writeAll(unqualified_name); } @@ -700,24 +685,27 @@ pub const Decl = struct { } pub fn renderFullyQualifiedDebugName(decl: Decl, mod: *Module, writer: anytype) !void { - const unqualified_name = mem.sliceTo(decl.name, 0); + const unqualified_name = mod.intern_pool.stringToSlice(decl.name); return mod.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(mod, unqualified_name, writer); } - pub fn getFullyQualifiedName(decl: Decl, mod: *Module) ![:0]u8 { - var buffer = std.ArrayList(u8).init(mod.gpa); - defer buffer.deinit(); - try decl.renderFullyQualifiedName(mod, buffer.writer()); + pub fn getFullyQualifiedName(decl: Decl, mod: *Module) !InternPool.NullTerminatedString { + const gpa = mod.gpa; + const ip = &mod.intern_pool; + const start = ip.string_bytes.items.len; + try decl.renderFullyQualifiedName(mod, ip.string_bytes.writer(gpa)); // Sanitize the name for nvptx which is more restrictive. + // TODO This should be handled by the backend, not the frontend. Have a + // look at how the C backend does it for inspiration. if (mod.comp.bin_file.options.target.cpu.arch.isNvptx()) { - for (buffer.items) |*byte| switch (byte.*) { + for (ip.string_bytes.items[start..]) |*byte| switch (byte.*) { '{', '}', '*', '[', ']', '(', ')', ',', ' ', '\'' => byte.* = '_', else => {}, }; } - return buffer.toOwnedSliceSentinel(0); + return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); } pub fn typedValue(decl: Decl) error{AnalysisFail}!TypedValue { @@ -804,11 +792,11 @@ pub const Decl = struct { pub fn dump(decl: *Decl) void { const loc = std.zig.findLineColumn(decl.scope.source.bytes, decl.src); - std.debug.print("{s}:{d}:{d} name={s} status={s}", .{ + std.debug.print("{s}:{d}:{d} name={d} status={s}", .{ decl.scope.sub_file_path, loc.line + 1, loc.column + 1, - mem.sliceTo(decl.name, 0), + @enumToInt(decl.name), @tagName(decl.analysis), }); if (decl.has_tv) { @@ -922,15 +910,15 @@ pub const Struct = struct { } }; - pub const Fields = std.StringArrayHashMapUnmanaged(Field); + pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field); /// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl. pub const Field = struct { /// Uses `noreturn` to indicate `anytype`. /// undefined until `status` is >= `have_field_types`. ty: Type, - /// Uses `unreachable_value` to indicate no default. - default_val: Value, + /// Uses `none` to indicate no default. + default_val: InternPool.Index, /// Zero means to use the ABI alignment of the type. abi_align: u32, /// undefined until `status` is `have_layout`. @@ -982,7 +970,7 @@ pub const Struct = struct { /// runtime version of the struct. pub const omitted_field = std.math.maxInt(u32); - pub fn getFullyQualifiedName(s: *Struct, mod: *Module) ![:0]u8 { + pub fn getFullyQualifiedName(s: *Struct, mod: *Module) !InternPool.NullTerminatedString { return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); } @@ -1141,9 +1129,9 @@ pub const Union = struct { } }; - pub const Fields = std.StringArrayHashMapUnmanaged(Field); + pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field); - pub fn getFullyQualifiedName(s: *Union, mod: *Module) ![:0]u8 { + pub fn getFullyQualifiedName(s: *Union, mod: *Module) !InternPool.NullTerminatedString { return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); } @@ -1569,15 +1557,15 @@ pub const Fn = struct { pub const DeclAdapter = struct { mod: *Module, - pub fn hash(self: @This(), s: []const u8) u32 { + pub fn hash(self: @This(), s: InternPool.NullTerminatedString) u32 { _ = self; - return @truncate(u32, std.hash.Wyhash.hash(0, s)); + return std.hash.uint32(@enumToInt(s)); } - pub fn eql(self: @This(), a: []const u8, b_decl_index: Decl.Index, b_index: usize) bool { + pub fn eql(self: @This(), a: InternPool.NullTerminatedString, b_decl_index: Decl.Index, b_index: usize) bool { _ = b_index; const b_decl = self.mod.declPtr(b_decl_index); - return mem.eql(u8, a, mem.sliceTo(b_decl.name, 0)); + return a == b_decl.name; } }; @@ -1628,16 +1616,14 @@ pub const Namespace = struct { pub fn hash(ctx: @This(), decl_index: Decl.Index) u32 { const decl = ctx.module.declPtr(decl_index); - return @truncate(u32, std.hash.Wyhash.hash(0, mem.sliceTo(decl.name, 0))); + return std.hash.uint32(@enumToInt(decl.name)); } pub fn eql(ctx: @This(), a_decl_index: Decl.Index, b_decl_index: Decl.Index, b_index: usize) bool { _ = b_index; const a_decl = ctx.module.declPtr(a_decl_index); const b_decl = ctx.module.declPtr(b_decl_index); - const a_name = mem.sliceTo(a_decl.name, 0); - const b_name = mem.sliceTo(b_decl.name, 0); - return mem.eql(u8, a_name, b_name); + return a_decl.name == b_decl.name; } }; @@ -1649,8 +1635,6 @@ pub const Namespace = struct { pub fn destroyDecls(ns: *Namespace, mod: *Module) void { const gpa = mod.gpa; - log.debug("destroyDecls {*}", .{ns}); - var decls = ns.decls; ns.decls = .{}; @@ -1676,8 +1660,6 @@ pub const Namespace = struct { ) !void { const gpa = mod.gpa; - log.debug("deleteAllDecls {*}", .{ns}); - var decls = ns.decls; ns.decls = .{}; @@ -1712,7 +1694,8 @@ pub const Namespace = struct { if (ns.parent.unwrap()) |parent| { const decl_index = ns.getDeclIndex(mod); const decl = mod.declPtr(decl_index); - try mod.namespacePtr(parent).renderFullyQualifiedName(mod, mem.sliceTo(decl.name, 0), writer); + const decl_name = mod.intern_pool.stringToSlice(decl.name); + try mod.namespacePtr(parent).renderFullyQualifiedName(mod, decl_name, writer); } else { try ns.file_scope.renderFullyQualifiedName(writer); } @@ -1733,7 +1716,8 @@ pub const Namespace = struct { if (ns.parent.unwrap()) |parent| { const decl_index = ns.getDeclIndex(mod); const decl = mod.declPtr(decl_index); - try mod.namespacePtr(parent).renderFullyQualifiedDebugName(mod, mem.sliceTo(decl.name, 0), writer); + const decl_name = mod.intern_pool.stringToSlice(decl.name); + try mod.namespacePtr(parent).renderFullyQualifiedDebugName(mod, decl_name, writer); } else { try ns.file_scope.renderFullyQualifiedDebugName(writer); separator_char = ':'; @@ -1927,11 +1911,11 @@ pub const File = struct { }; } - pub fn fullyQualifiedNameZ(file: File, gpa: Allocator) ![:0]u8 { - var buf = std.ArrayList(u8).init(gpa); - defer buf.deinit(); - try file.renderFullyQualifiedName(buf.writer()); - return buf.toOwnedSliceSentinel(0); + pub fn fullyQualifiedName(file: File, mod: *Module) !InternPool.NullTerminatedString { + const ip = &mod.intern_pool; + const start = ip.string_bytes.items.len; + try file.renderFullyQualifiedName(ip.string_bytes.writer(mod.gpa)); + return ip.getOrPutTrailingString(mod.gpa, ip.string_bytes.items.len - start); } /// Returns the full path to this file relative to its package. @@ -2055,7 +2039,7 @@ pub const ErrorMsg = struct { reference_trace: []Trace = &.{}, pub const Trace = struct { - decl: ?[*:0]const u8, + decl: InternPool.OptionalNullTerminatedString, src_loc: SrcLoc, hidden: u32 = 0, }; @@ -3180,8 +3164,8 @@ pub const CompileError = error{ pub fn init(mod: *Module) !void { const gpa = mod.gpa; - try mod.error_name_list.append(gpa, "(no error)"); try mod.intern_pool.init(gpa); + try mod.global_error_set.put(gpa, .empty, {}); } pub fn deinit(mod: *Module) void { @@ -3282,15 +3266,8 @@ pub fn deinit(mod: *Module) void { } mod.export_owners.deinit(gpa); - { - var it = mod.global_error_set.keyIterator(); - while (it.next()) |key| { - gpa.free(key.*); - } - mod.global_error_set.deinit(gpa); - } + mod.global_error_set.deinit(gpa); - mod.error_name_list.deinit(gpa); mod.test_functions.deinit(gpa); mod.align_stack_fns.deinit(gpa); mod.monomorphed_funcs.deinit(gpa); @@ -3305,13 +3282,13 @@ pub fn deinit(mod: *Module) void { mod.memoized_decls.deinit(gpa); mod.intern_pool.deinit(gpa); + mod.tmp_hack_arena.deinit(); } pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { const gpa = mod.gpa; { const decl = mod.declPtr(decl_index); - log.debug("destroy {*} ({s})", .{ decl, decl.name }); _ = mod.test_functions.swapRemove(decl_index); if (decl.deletion_flag) { assert(mod.deletion_set.swapRemove(decl_index)); @@ -3329,7 +3306,6 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { decl.clearValues(mod); decl.dependants.deinit(gpa); decl.dependencies.deinit(gpa); - decl.clearName(gpa); decl.* = undefined; } mod.decls_free_list.append(gpa, decl_index) catch { @@ -3391,11 +3367,7 @@ pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { } fn freeExportList(gpa: Allocator, export_list: *ArrayListUnmanaged(*Export)) void { - for (export_list.items) |exp| { - gpa.free(exp.options.name); - if (exp.options.section) |s| gpa.free(s); - gpa.destroy(exp); - } + for (export_list.items) |exp| gpa.destroy(exp); export_list.deinit(gpa); } @@ -3814,9 +3786,6 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { if (decl.zir_decl_index != 0) { const old_zir_decl_index = decl.zir_decl_index; const new_zir_decl_index = extra_map.get(old_zir_decl_index) orelse { - log.debug("updateZirRefs {s}: delete {*} ({s})", .{ - file.sub_file_path, decl, decl.name, - }); try file.deleted_decls.append(gpa, decl_index); continue; }; @@ -3824,14 +3793,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { decl.zir_decl_index = new_zir_decl_index; const new_hash = decl.contentsHashZir(new_zir); if (!std.zig.srcHashEql(old_hash, new_hash)) { - log.debug("updateZirRefs {s}: outdated {*} ({s}) {d} => {d}", .{ - file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index, - }); try file.outdated_decls.append(gpa, decl_index); - } else { - log.debug("updateZirRefs {s}: unchanged {*} ({s}) {d} => {d}", .{ - file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index, - }); } } @@ -4031,8 +3993,6 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { .complete => return, .outdated => blk: { - log.debug("re-analyzing {*} ({s})", .{ decl, decl.name }); - // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. try mod.deleteDeclExports(decl_index); @@ -4047,9 +4007,6 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { const dep = mod.declPtr(dep_index); dep.removeDependant(decl_index); if (dep.dependants.count() == 0 and !dep.deletion_flag) { - log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ - decl, decl.name, dep, dep.name, - }); try mod.markDeclForDeletion(dep_index); } } @@ -4061,7 +4018,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { .unreferenced => false, }; - var decl_prog_node = mod.sema_prog_node.start(mem.sliceTo(decl.name, 0), 0); + var decl_prog_node = mod.sema_prog_node.start(mod.intern_pool.stringToSlice(decl.name), 0); decl_prog_node.activate(); defer decl_prog_node.end(); @@ -4190,14 +4147,11 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void if (no_bin_file and !dump_air and !dump_llvm_ir) return; - log.debug("analyze liveness of {s}", .{decl.name}); var liveness = try Liveness.analyze(gpa, air, &mod.intern_pool); defer liveness.deinit(gpa); if (dump_air) { - const fqn = try decl.getFullyQualifiedName(mod); - defer mod.gpa.free(fqn); - + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); std.debug.print("# Begin Function AIR: {s}:\n", .{fqn}); @import("print_air.zig").dump(mod, air, liveness); std.debug.print("# End Function AIR: {s}\n\n", .{fqn}); @@ -4354,9 +4308,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { if (file.root_decl != .none) return; const gpa = mod.gpa; - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); // Because these three things each reference each other, `undefined` // placeholders are used before being set after the struct type gains an @@ -4394,7 +4345,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { new_namespace.ty = struct_ty.toType(); file.root_decl = new_decl_index.toOptional(); - new_decl.name = try file.fullyQualifiedNameZ(gpa); + new_decl.name = try file.fullyQualifiedName(mod); new_decl.src_line = 0; new_decl.is_pub = true; new_decl.is_exported = false; @@ -4403,7 +4354,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { new_decl.ty = Type.type; new_decl.val = struct_ty.toValue(); new_decl.@"align" = 0; - new_decl.@"linksection" = null; + new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.owns_tv = true; new_decl.alive = true; // This Decl corresponds to a File and is therefore always alive. @@ -4431,7 +4382,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .mod = mod, .gpa = gpa, .arena = sema_arena_allocator, - .perm_arena = new_decl_arena_allocator, .code = file.zir, .owner_decl = new_decl, .owner_decl_index = new_decl_index, @@ -4484,8 +4434,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { } else { new_decl.analysis = .file_failure; } - - try new_decl.finalizeNewArena(&new_decl_arena); } /// Returns `true` if the Decl type changed. @@ -4507,28 +4455,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.analysis = .in_progress; - // We need the memory for the Type to go into the arena for the Decl - var decl_arena = std.heap.ArenaAllocator.init(gpa); - const decl_arena_allocator = decl_arena.allocator(); - const decl_value_arena = blk: { - errdefer decl_arena.deinit(); - const s = try decl_arena_allocator.create(ValueArena); - s.* = .{ .state = undefined }; - break :blk s; - }; - defer { - if (decl.value_arena) |value_arena| { - assert(value_arena.state_acquired == null); - decl_value_arena.prev = value_arena; - } - - decl_value_arena.state = decl_arena.state; - decl.value_arena = decl_value_arena; - } - var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); - const analysis_arena_allocator = analysis_arena.allocator(); var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); defer comptime_mutable_decls.deinit(); @@ -4536,8 +4464,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { var sema: Sema = .{ .mod = mod, .gpa = gpa, - .arena = analysis_arena_allocator, - .perm_arena = decl_arena_allocator, + .arena = analysis_arena.allocator(), .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -4551,7 +4478,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { defer sema.deinit(); if (mod.declIsRoot(decl_index)) { - log.debug("semaDecl root {*} ({s})", .{ decl, decl.name }); const main_struct_inst = Zir.main_struct_inst; const struct_index = decl.getOwnedStructIndex(mod).unwrap().?; const struct_obj = mod.structPtr(struct_index); @@ -4563,7 +4489,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.generation = mod.generation; return false; } - log.debug("semaDecl {*} ({s})", .{ decl, decl.name }); var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); @@ -4619,7 +4544,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.ty = InternPool.Index.type_type.toType(); decl.val = ty.toValue(); decl.@"align" = 0; - decl.@"linksection" = null; + decl.@"linksection" = .none; decl.has_tv = true; decl.owns_tv = false; decl.analysis = .complete; @@ -4646,7 +4571,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.clearValues(mod); decl.ty = decl_tv.ty; - decl.val = try decl_tv.val.copy(decl_arena_allocator); + decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue(); // linksection, align, and addrspace were already set by Sema decl.has_tv = true; decl.owns_tv = owns_tv; @@ -4660,7 +4585,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return sema.fail(&block_scope, export_src, "export of inline function", .{}); } // The scope needs to have the decl in it. - const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) }; + const options: std.builtin.ExportOptions = .{ + .name = mod.intern_pool.stringToSlice(decl.name), + }; try sema.analyzeExport(&block_scope, export_src, options, decl_index); } return type_changed or is_inline != prev_is_inline; @@ -4693,14 +4620,13 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { .func => {}, else => { - log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name }); queue_linker_work = true; }, }, } decl.ty = decl_tv.ty; - decl.val = try decl_tv.val.copy(decl_arena_allocator); + decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue(); decl.@"align" = blk: { const align_ref = decl.zirAlignRef(mod); if (align_ref == .none) break :blk 0; @@ -4708,14 +4634,15 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { }; decl.@"linksection" = blk: { const linksection_ref = decl.zirLinksectionRef(mod); - if (linksection_ref == .none) break :blk null; + if (linksection_ref == .none) break :blk .none; const bytes = try sema.resolveConstString(&block_scope, section_src, linksection_ref, "linksection must be comptime-known"); if (mem.indexOfScalar(u8, bytes, 0) != null) { return sema.fail(&block_scope, section_src, "linksection cannot contain null bytes", .{}); } else if (bytes.len == 0) { return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{}); } - break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr; + const section = try mod.intern_pool.getOrPutString(gpa, bytes); + break :blk section.toOptional(); }; decl.@"addrspace" = blk: { const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { @@ -4743,7 +4670,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { (queue_linker_work and try sema.typeHasRuntimeBits(decl.ty)); if (has_runtime_bits) { - log.debug("queue linker work for {*} ({s})", .{ decl, decl.name }); // Needed for codegen_decl which will call updateDecl and then the // codegen backend wants full access to the Decl Type. @@ -4759,7 +4685,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (decl.is_exported) { const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) }; // The scope needs to have the decl in it. - const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) }; + const options: std.builtin.ExportOptions = .{ + .name = mod.intern_pool.stringToSlice(decl.name), + }; try sema.analyzeExport(&block_scope, export_src, options, decl_index); } @@ -4785,10 +4713,6 @@ pub fn declareDeclDependencyType(mod: *Module, depender_index: Decl.Index, depen } } - log.debug("{*} ({s}) depends on {*} ({s})", .{ - depender, depender.name, dependee, dependee.name, - }); - if (dependee.deletion_flag) { dependee.deletion_flag = false; assert(mod.deletion_set.swapRemove(dependee_index)); @@ -5138,6 +5062,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err const namespace = mod.namespacePtr(namespace_index); const gpa = mod.gpa; const zir = namespace.file_scope.zir; + const ip = &mod.intern_pool; // zig fmt: off const is_pub = (flags & 0b0001) != 0; @@ -5157,31 +5082,31 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err // Every Decl needs a name. var is_named_test = false; var kind: Decl.Kind = .named; - const decl_name: [:0]const u8 = switch (decl_name_index) { + const decl_name: InternPool.NullTerminatedString = switch (decl_name_index) { 0 => name: { if (export_bit) { const i = iter.usingnamespace_index; iter.usingnamespace_index += 1; kind = .@"usingnamespace"; - break :name try std.fmt.allocPrintZ(gpa, "usingnamespace_{d}", .{i}); + break :name try ip.getOrPutStringFmt(gpa, "usingnamespace_{d}", .{i}); } else { const i = iter.comptime_index; iter.comptime_index += 1; kind = .@"comptime"; - break :name try std.fmt.allocPrintZ(gpa, "comptime_{d}", .{i}); + break :name try ip.getOrPutStringFmt(gpa, "comptime_{d}", .{i}); } }, 1 => name: { const i = iter.unnamed_test_index; iter.unnamed_test_index += 1; kind = .@"test"; - break :name try std.fmt.allocPrintZ(gpa, "test_{d}", .{i}); + break :name try ip.getOrPutStringFmt(gpa, "test_{d}", .{i}); }, 2 => name: { is_named_test = true; const test_name = zir.nullTerminatedString(decl_doccomment_index); kind = .@"test"; - break :name try std.fmt.allocPrintZ(gpa, "decltest.{s}", .{test_name}); + break :name try ip.getOrPutStringFmt(gpa, "decltest.{s}", .{test_name}); }, else => name: { const raw_name = zir.nullTerminatedString(decl_name_index); @@ -5189,14 +5114,12 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err is_named_test = true; const test_name = zir.nullTerminatedString(decl_name_index + 1); kind = .@"test"; - break :name try std.fmt.allocPrintZ(gpa, "test.{s}", .{test_name}); + break :name try ip.getOrPutStringFmt(gpa, "test.{s}", .{test_name}); } else { - break :name try gpa.dupeZ(u8, raw_name); + break :name try ip.getOrPutString(gpa, raw_name); } }, }; - var must_free_decl_name = true; - defer if (must_free_decl_name) gpa.free(decl_name); const is_exported = export_bit and decl_name_index != 0; if (kind == .@"usingnamespace") try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1); @@ -5204,7 +5127,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err // We create a Decl for it regardless of analysis status. const gop = try namespace.decls.getOrPutContextAdapted( gpa, - @as([]const u8, mem.sliceTo(decl_name, 0)), + decl_name, DeclAdapter{ .mod = mod }, Namespace.DeclContext{ .module = mod }, ); @@ -5214,11 +5137,9 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err const new_decl = mod.declPtr(new_decl_index); new_decl.kind = kind; new_decl.name = decl_name; - must_free_decl_name = false; if (kind == .@"usingnamespace") { namespace.usingnamespace_set.putAssumeCapacity(new_decl_index, is_pub); } - log.debug("scan new {*} ({s}) into {*}", .{ new_decl, decl_name, namespace }); new_decl.src_line = line; gop.key_ptr.* = new_decl_index; // Exported decls, comptime decls, usingnamespace decls, and @@ -5239,7 +5160,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err if (!comp.bin_file.options.is_test) break :blk false; if (decl_pkg != mod.main_pkg) break :blk false; if (comp.test_filter) |test_filter| { - if (mem.indexOf(u8, decl_name, test_filter) == null) { + if (mem.indexOf(u8, ip.stringToSlice(decl_name), test_filter) == null) { break :blk false; } } @@ -5270,7 +5191,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err gpa, src_loc, "duplicate test name: {s}", - .{decl_name}, + .{ip.stringToSlice(decl_name)}, ); errdefer msg.destroy(gpa); try mod.failed_decls.putNoClobber(gpa, decl_index, msg); @@ -5281,7 +5202,6 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err }; try mod.errNoteNonLazy(other_src_loc, msg, "other test here", .{}); } - log.debug("scan existing {*} ({s}) of {*}", .{ decl, decl.name, namespace }); // Update the AST node of the decl; even if its contents are unchanged, it may // have been re-ordered. decl.src_node = decl_node; @@ -5315,7 +5235,6 @@ pub fn clearDecl( defer tracy.end(); const decl = mod.declPtr(decl_index); - log.debug("clearing {*} ({s})", .{ decl, decl.name }); const gpa = mod.gpa; try mod.deletion_set.ensureUnusedCapacity(gpa, decl.dependencies.count()); @@ -5330,9 +5249,6 @@ pub fn clearDecl( const dep = mod.declPtr(dep_index); dep.removeDependant(decl_index); if (dep.dependants.count() == 0 and !dep.deletion_flag) { - log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ - decl, decl.name, dep, dep.name, - }); // We don't recursively perform a deletion here, because during the update, // another reference to it may turn up. dep.deletion_flag = true; @@ -5387,7 +5303,6 @@ pub fn clearDecl( /// This function is exclusively called for anonymous decls. pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { const decl = mod.declPtr(decl_index); - log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name }); assert(!mod.declIsRoot(decl_index)); assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); @@ -5415,7 +5330,6 @@ fn markDeclForDeletion(mod: *Module, decl_index: Decl.Index) !void { /// If other decls depend on this decl, they must be aborted first. pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { const decl = mod.declPtr(decl_index); - log.debug("abortAnonDecl {*} ({s})", .{ decl, decl.name }); assert(!mod.declIsRoot(decl_index)); assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); @@ -5468,21 +5382,20 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void } } if (mod.comp.bin_file.cast(link.File.Elf)) |elf| { - elf.deleteDeclExport(decl_index, exp.options.name); + elf.deleteDeclExport(decl_index, exp.name); } if (mod.comp.bin_file.cast(link.File.MachO)) |macho| { - try macho.deleteDeclExport(decl_index, exp.options.name); + try macho.deleteDeclExport(decl_index, exp.name); } if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| { wasm.deleteDeclExport(decl_index); } if (mod.comp.bin_file.cast(link.File.Coff)) |coff| { - coff.deleteDeclExport(decl_index, exp.options.name); + coff.deleteDeclExport(decl_index, exp.name); } if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| { failed_kv.value.destroy(mod.gpa); } - mod.gpa.free(exp.options.name); mod.gpa.destroy(exp); } export_owners.deinit(mod.gpa); @@ -5497,11 +5410,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); - // Use the Decl's arena for captured values. - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); - var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); defer comptime_mutable_decls.deinit(); @@ -5512,7 +5420,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE .mod = mod, .gpa = gpa, .arena = arena, - .perm_arena = decl_arena_allocator, .code = decl.getFileScope(mod).zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -5616,7 +5523,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE } func.state = .in_progress; - log.debug("set {s} to in_progress", .{decl.name}); const last_arg_index = inner_block.instructions.items.len; @@ -5677,7 +5583,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = main_block_index; func.state = .success; - log.debug("set {s} to success", .{decl.name}); // Finally we must resolve the return type and parameter types so that backends // have full access to type information. @@ -5724,7 +5629,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { const decl = mod.declPtr(decl_index); - log.debug("mark outdated {*} ({s})", .{ decl, decl.name }); try mod.comp.work_queue.writeItem(.{ .analyze_decl = decl_index }); if (mod.failed_decls.fetchSwapRemove(decl_index)) |kv| { kv.value.destroy(mod.gpa); @@ -5821,7 +5725,7 @@ pub fn allocateNewDecl( .ty = undefined, .val = undefined, .@"align" = undefined, - .@"linksection" = undefined, + .@"linksection" = .none, .@"addrspace" = .generic, .analysis = .unreferenced, .deletion_flag = false, @@ -5839,25 +5743,20 @@ pub fn allocateNewDecl( return decl_and_index.decl_index; } -/// Get error value for error tag `name`. -pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged(ErrorInt).KV { +pub fn getErrorValue( + mod: *Module, + name: InternPool.NullTerminatedString, +) Allocator.Error!ErrorInt { const gop = try mod.global_error_set.getOrPut(mod.gpa, name); - if (gop.found_existing) { - return std.StringHashMapUnmanaged(ErrorInt).KV{ - .key = gop.key_ptr.*, - .value = gop.value_ptr.*, - }; - } + return @intCast(ErrorInt, gop.index); +} - errdefer assert(mod.global_error_set.remove(name)); - try mod.error_name_list.ensureUnusedCapacity(mod.gpa, 1); - gop.key_ptr.* = try mod.gpa.dupe(u8, name); - gop.value_ptr.* = @intCast(ErrorInt, mod.error_name_list.items.len); - mod.error_name_list.appendAssumeCapacity(gop.key_ptr.*); - return std.StringHashMapUnmanaged(ErrorInt).KV{ - .key = gop.key_ptr.*, - .value = gop.value_ptr.*, - }; +pub fn getErrorValueFromSlice( + mod: *Module, + name: []const u8, +) Allocator.Error!ErrorInt { + const interned_name = try mod.intern_pool.getOrPutString(mod.gpa, name); + return getErrorValue(mod, interned_name); } pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedValue) !Decl.Index { @@ -5874,24 +5773,23 @@ pub fn createAnonymousDeclFromDecl( ) !Decl.Index { const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope); errdefer mod.destroyDecl(new_decl_index); - const name = try std.fmt.allocPrintZ(mod.gpa, "{s}__anon_{d}", .{ - src_decl.name, @enumToInt(new_decl_index), + const ip = &mod.intern_pool; + const name = try ip.getOrPutStringFmt(mod.gpa, "{s}__anon_{d}", .{ + ip.stringToSlice(src_decl.name), @enumToInt(new_decl_index), }); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, tv, name); return new_decl_index; } -/// Takes ownership of `name` even if it returns an error. pub fn initNewAnonDecl( mod: *Module, new_decl_index: Decl.Index, src_line: u32, namespace: Namespace.Index, typed_value: TypedValue, - name: [:0]u8, + name: InternPool.NullTerminatedString, ) Allocator.Error!void { assert(typed_value.ty.toIntern() == mod.intern_pool.typeOf(typed_value.val.toIntern())); - errdefer mod.gpa.free(name); const new_decl = mod.declPtr(new_decl_index); @@ -5900,7 +5798,7 @@ pub fn initNewAnonDecl( new_decl.ty = typed_value.ty; new_decl.val = typed_value.val; new_decl.@"align" = 0; - new_decl.@"linksection" = null; + new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; @@ -6330,12 +6228,11 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { // deletion set at this time. for (file.deleted_decls.items) |decl_index| { const decl = mod.declPtr(decl_index); - log.debug("deleted from source: {*} ({s})", .{ decl, decl.name }); // Remove from the namespace it resides in, preserving declaration order. assert(decl.zir_decl_index != 0); _ = mod.namespacePtr(decl.src_namespace).decls.orderedRemoveAdapted( - @as([]const u8, mem.sliceTo(decl.name, 0)), + decl.name, DeclAdapter{ .mod = mod }, ); @@ -6357,7 +6254,7 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { pub fn processExports(mod: *Module) !void { const gpa = mod.gpa; // Map symbol names to `Export` for name collision detection. - var symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{}; + var symbol_exports: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, *Export) = .{}; defer symbol_exports.deinit(gpa); var it = mod.decl_exports.iterator(); @@ -6365,13 +6262,13 @@ pub fn processExports(mod: *Module) !void { const exported_decl = entry.key_ptr.*; const exports = entry.value_ptr.items; for (exports) |new_export| { - const gop = try symbol_exports.getOrPut(gpa, new_export.options.name); + const gop = try symbol_exports.getOrPut(gpa, new_export.name); if (gop.found_existing) { new_export.status = .failed_retryable; try mod.failed_exports.ensureUnusedCapacity(gpa, 1); const src_loc = new_export.getSrcLoc(mod); const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {s}", .{ - new_export.options.name, + mod.intern_pool.stringToSlice(new_export.name), }); errdefer msg.destroy(gpa); const other_export = gop.value_ptr.*; @@ -6408,8 +6305,9 @@ pub fn populateTestFunctions( const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file; const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?); const builtin_namespace = mod.namespacePtr(root_decl.src_namespace); + const test_functions_str = try mod.intern_pool.getOrPutString(gpa, "test_functions"); const decl_index = builtin_namespace.decls.getKeyAdapted( - @as([]const u8, "test_functions"), + test_functions_str, DeclAdapter{ .mod = mod }, ).?; { @@ -6443,7 +6341,7 @@ pub fn populateTestFunctions( for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = mod.declPtr(test_decl_index); - const test_decl_name = mem.span(test_decl.name); + const test_decl_name = mod.intern_pool.stringToSlice(test_decl.name); const test_name_decl_index = n: { const test_name_decl_ty = try mod.arrayType(.{ .len = test_decl_name.len, @@ -7156,7 +7054,7 @@ pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc return mod.declPtr(opaque_type.decl).srcLoc(mod); } -pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) ![:0]u8 { +pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) !InternPool.NullTerminatedString { return mod.declPtr(opaque_type.decl).getFullyQualifiedName(mod); } diff --git a/src/Sema.zig b/src/Sema.zig index 620a8a6a28a3..da8878ed4d59 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -11,9 +11,6 @@ gpa: Allocator, /// Points to the temporary arena allocator of the Sema. /// This arena will be cleared when the sema is destroyed. arena: Allocator, -/// Points to the arena allocator for the owner_decl. -/// This arena will persist until the decl is invalidated. -perm_arena: Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, air_extra: std.ArrayListUnmanaged(u32) = .{}, @@ -740,7 +737,6 @@ pub const Block = struct { // TODO: migrate Decl alignment to use `InternPool.Alignment` new_decl.@"align" = @intCast(u32, alignment); errdefer sema.mod.abortAnonDecl(new_decl_index); - try new_decl.finalizeNewArena(&wad.new_decl_arena); wad.finished = true; try sema.mod.finalizeAnonDecl(new_decl_index); return new_decl_index; @@ -1825,6 +1821,20 @@ pub fn resolveConstString( return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod); } +pub fn resolveConstStringIntern( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + zir_ref: Zir.Inst.Ref, + reason: []const u8, +) !InternPool.NullTerminatedString { + const air_inst = try sema.resolveInst(zir_ref); + const wanted_type = Type.slice_const_u8; + const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); + const val = try sema.resolveConstValue(block, src, coerced_inst, reason); + return val.toIpString(wanted_type, sema.mod); +} + pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { const air_inst = try sema.resolveInst(zir_ref); assert(air_inst != .var_args_param_type); @@ -1847,11 +1857,13 @@ fn analyzeAsType( pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; if (!mod.backendSupportsFeature(.error_return_trace)) return; assert(!block.is_comptime); var err_trace_block = block.makeSubBlock(); - defer err_trace_block.instructions.deinit(sema.gpa); + defer err_trace_block.instructions.deinit(gpa); const src: LazySrcLoc = .unneeded; @@ -1866,17 +1878,19 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; - const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "instruction_addresses", src, true); + const instruction_addresses_field_name = try ip.getOrPutString(gpa, "instruction_addresses"); + const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, instruction_addresses_field_name, src, true); try sema.storePtr2(&err_trace_block, src, addr_field_ptr, src, addrs_ptr, src, .store); // st.index = 0; - const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "index", src, true); + const index_field_name = try ip.getOrPutString(gpa, "index"); + const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, index_field_name, src, true); try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store); // @errorReturnTrace() = &st; _ = try err_trace_block.addUnOp(.set_err_return_trace, st_ptr); - try block.instructions.insertSlice(sema.gpa, last_arg_index, err_trace_block.instructions.items); + try block.instructions.insertSlice(gpa, last_arg_index, err_trace_block.instructions.items); } /// May return Value Tags: `variable`, `undef`. @@ -2179,7 +2193,13 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError return sema.failWithOwnedErrorMsg(msg); } -fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError { +fn failWithInvalidFieldAccess( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + object_ty: Type, + field_name: InternPool.NullTerminatedString, +) CompileError { const mod = sema.mod; const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty; @@ -2207,15 +2227,16 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); } -fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: []const u8) bool { +fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: InternPool.NullTerminatedString) bool { + const ip = &mod.intern_pool; switch (ty.zigTypeTag(mod)) { - .Array => return mem.eql(u8, field_name, "len"), + .Array => return ip.stringEqlSlice(field_name, "len"), .Pointer => { const ptr_info = ty.ptrInfo(mod); if (ptr_info.size == .Slice) { - return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len"); + return ip.stringEqlSlice(field_name, "ptr") or ip.stringEqlSlice(field_name, "len"); } else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { - return mem.eql(u8, field_name, "len"); + return ip.stringEqlSlice(field_name, "len"); } else return false; }, .Type, .Struct, .Union => return true, @@ -2308,19 +2329,19 @@ pub fn fail( fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { @setCold(true); const gpa = sema.gpa; + const mod = sema.mod; - if (crash_report.is_enabled and sema.mod.comp.debug_compile_errors) { + if (crash_report.is_enabled and mod.comp.debug_compile_errors) { if (err_msg.src_loc.lazy == .unneeded) return error.NeededSourceLocation; var wip_errors: std.zig.ErrorBundle.Wip = undefined; wip_errors.init(gpa) catch unreachable; - Compilation.addModuleErrorMsg(&wip_errors, err_msg.*) catch unreachable; + Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*) catch unreachable; std.debug.print("compile error during Sema:\n", .{}); var error_bundle = wip_errors.toOwnedBundle("") catch unreachable; error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); crash_report.compilerPanic("unexpected compile error occurred", null, null); } - const mod = sema.mod; ref: { errdefer err_msg.destroy(gpa); if (err_msg.src_loc.lazy == .unneeded) { @@ -2330,9 +2351,9 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { try mod.failed_files.ensureUnusedCapacity(gpa, 1); const max_references = blk: { - if (sema.mod.comp.reference_trace) |num| break :blk num; + if (mod.comp.reference_trace) |num| break :blk num; // Do not add multiple traces without explicit request. - if (sema.mod.failed_decls.count() != 0) break :ref; + if (mod.failed_decls.count() != 0) break :ref; break :blk default_reference_trace_len; }; @@ -2350,13 +2371,16 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { if (gop.found_existing) break; if (cur_reference_trace < max_references) { const decl = sema.mod.declPtr(ref.referencer); - try reference_stack.append(.{ .decl = decl.name, .src_loc = ref.src.toSrcLoc(decl, mod) }); + try reference_stack.append(.{ + .decl = decl.name.toOptional(), + .src_loc = ref.src.toSrcLoc(decl, mod), + }); } referenced_by = ref.referencer; } if (sema.mod.comp.reference_trace == null and cur_reference_trace > 0) { try reference_stack.append(.{ - .decl = null, + .decl = .none, .src_loc = undefined, .hidden = 0, }); @@ -2795,7 +2819,6 @@ fn zirStructDecl( new_namespace.ty = struct_ty.toType(); try sema.analyzeStructDecl(new_decl, inst, struct_index); - try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; @@ -2812,6 +2835,7 @@ fn createAnonymousDeclTypeNamed( ) !Decl.Index { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const namespace = block.namespace; const src_scope = block.wip_capture_scope; const src_decl = mod.declPtr(block.src_decl); @@ -2827,16 +2851,19 @@ fn createAnonymousDeclTypeNamed( // semantically analyzed. // This name is also used as the key in the parent namespace so it cannot be // renamed. - const name = try std.fmt.allocPrintZ(gpa, "{s}__{s}_{d}", .{ - src_decl.name, anon_prefix, @enumToInt(new_decl_index), - }); - errdefer gpa.free(name); + + // This ensureUnusedCapacity protects against the src_decl slice from being + // reallocated during the call to `getOrPutStringFmt`. + try ip.string_bytes.ensureUnusedCapacity(gpa, ip.stringToSlice(src_decl.name).len + + anon_prefix.len + 20); + const name = ip.getOrPutStringFmt(gpa, "{s}__{s}_{d}", .{ + ip.stringToSlice(src_decl.name), anon_prefix, @enumToInt(new_decl_index), + }) catch unreachable; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, .parent => { - const name = try gpa.dupeZ(u8, mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); - errdefer gpa.free(name); + const name = mod.declPtr(block.src_decl).name; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2846,7 +2873,7 @@ fn createAnonymousDeclTypeNamed( var buf = std.ArrayList(u8).init(gpa); defer buf.deinit(); - try buf.appendSlice(mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); + try buf.appendSlice(ip.stringToSlice(mod.declPtr(block.src_decl).name)); try buf.appendSlice("("); var arg_i: usize = 0; @@ -2871,8 +2898,7 @@ fn createAnonymousDeclTypeNamed( }; try buf.appendSlice(")"); - const name = try buf.toOwnedSliceSentinel(0); - errdefer gpa.free(name); + const name = try ip.getOrPutString(gpa, buf.items); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2885,10 +2911,17 @@ fn createAnonymousDeclTypeNamed( .dbg_var_ptr, .dbg_var_val => { if (zir_data[i].str_op.operand != ref) continue; - const name = try std.fmt.allocPrintZ(gpa, "{s}.{s}", .{ - src_decl.name, zir_data[i].str_op.getStr(sema.code), - }); - errdefer gpa.free(name); + // This ensureUnusedCapacity protects against the src_decl + // slice from being reallocated during the call to + // `getOrPutStringFmt`. + const zir_str = zir_data[i].str_op.getStr(sema.code); + try ip.string_bytes.ensureUnusedCapacity( + gpa, + ip.stringToSlice(src_decl.name).len + zir_str.len + 10, + ); + const name = ip.getOrPutStringFmt(gpa, "{s}.{s}", .{ + ip.stringToSlice(src_decl.name), zir_str, + }) catch unreachable; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; @@ -3249,7 +3282,6 @@ fn zirUnionDecl( _ = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); - try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; @@ -3315,7 +3347,6 @@ fn zirOpaqueDecl( extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); - try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; @@ -3344,8 +3375,8 @@ fn zirErrorSetDecl( while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const str_index = sema.code.extra[extra_index]; const name = sema.code.nullTerminatedString(str_index); - const kv = try mod.getErrorValue(name); - const name_ip = try mod.intern_pool.getOrPutString(gpa, kv.key); + const name_ip = try mod.intern_pool.getOrPutString(gpa, name); + _ = try mod.getErrorValue(name_ip); const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen } @@ -3512,7 +3543,8 @@ fn indexablePtrLen( const is_pointer_to = object_ty.isSinglePointer(mod); const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; try checkIndexable(sema, block, src, indexable_ty); - return sema.fieldVal(block, src, object, "len", src); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len"); + return sema.fieldVal(block, src, object, field_name, src); } fn indexablePtrLenOrNone( @@ -3525,7 +3557,8 @@ fn indexablePtrLenOrNone( const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); if (operand_ty.ptrSize(mod) == .Many) return .none; - return sema.fieldVal(block, src, operand, "len", src); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len"); + return sema.fieldVal(block, src, operand, field_name, src); } fn zirAllocExtended( @@ -4079,6 +4112,7 @@ fn zirFieldBasePtr( fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.operands_len); @@ -4122,7 +4156,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (!object_ty.indexableHasLen(mod)) continue; - break :l try sema.fieldVal(block, arg_src, object, "len", arg_src); + break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, "len"), arg_src); }; const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src); if (len == .none) { @@ -4308,6 +4342,7 @@ fn validateUnionInit( union_ptr: Air.Inst.Ref, ) CompileError!void { const mod = sema.mod; + const gpa = sema.gpa; if (instrs.len != 1) { const msg = msg: { @@ -4317,7 +4352,7 @@ fn validateUnionInit( "cannot initialize multiple union fields at once; unions can only have one active field", .{}, ); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); for (instrs[1..]) |inst| { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -4341,7 +4376,7 @@ fn validateUnionInit( const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node }; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_ptr_extra.field_name_start)); // Validate the field access but ignore the index since we want the tag enum field index. _ = try sema.unionFieldIndex(block, union_ty, field_name, field_src); const air_tags = sema.air_instructions.items(.tag); @@ -4444,6 +4479,7 @@ fn validateStructInit( ) CompileError!void { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; // Maps field index to field_ptr index of where it was already initialized. const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount(mod)); @@ -4457,7 +4493,10 @@ fn validateStructInit( const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node }; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; struct_ptr_zir_ref = field_ptr_extra.lhs; - const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); + const field_name = try ip.getOrPutString( + gpa, + sema.code.nullTerminatedString(field_ptr_extra.field_name_start), + ); const field_index = if (struct_ty.isTuple(mod)) try sema.tupleFieldIndex(block, struct_ty, field_name, field_src) else @@ -4504,7 +4543,7 @@ fn validateStructInit( } const field_name = struct_ty.structFieldName(i, mod); const template = "missing struct field: {s}"; - const args = .{field_name}; + const args = .{ip.stringToSlice(field_name)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -4525,8 +4564,7 @@ fn validateStructInit( if (root_msg) |msg| { if (mod.typeToStruct(struct_ty)) |struct_obj| { - const fqn = try struct_obj.getFullyQualifiedName(mod); - defer gpa.free(fqn); + const fqn = ip.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); try mod.errNoteNonLazy( struct_obj.srcLoc(mod), msg, @@ -4649,7 +4687,7 @@ fn validateStructInit( } const field_name = struct_ty.structFieldName(i, mod); const template = "missing struct field: {s}"; - const args = .{field_name}; + const args = .{ip.stringToSlice(field_name)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -4662,10 +4700,9 @@ fn validateStructInit( if (root_msg) |msg| { if (mod.typeToStruct(struct_ty)) |struct_obj| { - const fqn = try struct_obj.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); + const fqn = ip.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); try sema.mod.errNoteNonLazy( - struct_obj.srcLoc(sema.mod), + struct_obj.srcLoc(mod), msg, "struct '{s}' declared here", .{fqn}, @@ -4949,7 +4986,7 @@ fn failWithBadMemberAccess( block: *Block, agg_ty: Type, field_src: LazySrcLoc, - field_name: []const u8, + field_name_nts: InternPool.NullTerminatedString, ) CompileError { const mod = sema.mod; const kw_name = switch (agg_ty.zigTypeTag(mod)) { @@ -4959,6 +4996,7 @@ fn failWithBadMemberAccess( .Enum => "enum", else => unreachable, }; + const field_name = mod.intern_pool.stringToSlice(field_name_nts); if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (sema.mod.declIsRoot(some)) { return sema.fail(block, field_src, "root struct of file '{}' has no member named '{s}'", .{ agg_ty.fmt(sema.mod), field_name, @@ -4980,22 +5018,23 @@ fn failWithBadStructFieldAccess( block: *Block, struct_obj: *Module.Struct, field_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, ) CompileError { + const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; - const fqn = try struct_obj.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); + const fqn = ip.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); const msg = msg: { const msg = try sema.errMsg( block, field_src, "no field named '{s}' in struct '{s}'", - .{ field_name, fqn }, + .{ ip.stringToSlice(field_name), fqn }, ); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(struct_obj.srcLoc(sema.mod), msg, "struct declared here", .{}); + try mod.errNoteNonLazy(struct_obj.srcLoc(mod), msg, "struct declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -5006,22 +5045,23 @@ fn failWithBadUnionFieldAccess( block: *Block, union_obj: *Module.Union, field_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, ) CompileError { + const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; - const fqn = try union_obj.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); + const fqn = ip.stringToSlice(try union_obj.getFullyQualifiedName(mod)); const msg = msg: { const msg = try sema.errMsg( block, field_src, "no field named '{s}' in union '{s}'", - .{ field_name, fqn }, + .{ ip.stringToSlice(field_name), fqn }, ); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(union_obj.srcLoc(sema.mod), msg, "union declared here", .{}); + try mod.errNoteNonLazy(union_obj.srcLoc(mod), msg, "union declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -5772,7 +5812,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const decl_name = sema.code.nullTerminatedString(extra.decl_name); + const decl_name = try mod.intern_pool.getOrPutString(mod.gpa, sema.code.nullTerminatedString(extra.decl_name)); const decl_index = if (extra.namespace != .none) index_blk: { const container_ty = try sema.resolveType(block, operand_src, extra.namespace); const container_namespace = container_ty.getNamespaceIndex(mod).unwrap().?; @@ -5875,19 +5915,14 @@ pub fn analyzeExport( const new_export = try gpa.create(Export); errdefer gpa.destroy(new_export); - const symbol_name = try gpa.dupe(u8, borrowed_options.name); - errdefer gpa.free(symbol_name); - - const section: ?[]const u8 = if (borrowed_options.section) |s| try gpa.dupe(u8, s) else null; - errdefer if (section) |s| gpa.free(s); + const symbol_name = try mod.intern_pool.getOrPutString(gpa, borrowed_options.name); + const section = try mod.intern_pool.getOrPutStringOpt(gpa, borrowed_options.section); new_export.* = .{ - .options = .{ - .name = symbol_name, - .linkage = borrowed_options.linkage, - .section = section, - .visibility = borrowed_options.visibility, - }, + .name = symbol_name, + .linkage = borrowed_options.linkage, + .section = section, + .visibility = borrowed_options.visibility, .src = src, .owner_decl = sema.owner_decl_index, .src_decl = block.src_decl, @@ -6121,23 +6156,25 @@ fn addDbgVar( } fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); - const decl_name = inst_data.get(sema.code); + const decl_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); const decl_index = try sema.lookupIdentifier(block, src, decl_name); try sema.addReferencedBy(block, src, decl_index); return sema.analyzeDeclRef(decl_index); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); - const decl_name = inst_data.get(sema.code); + const decl_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); const decl = try sema.lookupIdentifier(block, src, decl_name); return sema.analyzeDeclVal(block, src, decl); } -fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8) !Decl.Index { +fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString) !Decl.Index { const mod = sema.mod; var namespace = block.namespace; while (true) { @@ -6156,7 +6193,7 @@ fn lookupInNamespace( block: *Block, src: LazySrcLoc, namespace_index: Namespace.Index, - ident_name: []const u8, + ident_name: InternPool.NullTerminatedString, observe_usingnamespace: bool, ) CompileError!?Decl.Index { const mod = sema.mod; @@ -6249,9 +6286,6 @@ fn lookupInNamespace( return decl_index; } - log.debug("{*} ({s}) depends on non-existence of '{s}' in {*} ({s})", .{ - sema.owner_decl, sema.owner_decl.name, ident_name, namespace_decl, namespace_decl.name, - }); // TODO This dependency is too strong. Really, it should only be a dependency // on the non-existence of `ident_name` in the namespace. We can lessen the number of // outdated declarations by making this dependency more sophisticated. @@ -6276,10 +6310,12 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { } pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; const src = sema.src; - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return .none; - if (!sema.mod.comp.bin_file.options.error_return_tracing) return .none; + if (!mod.backendSupportsFeature(.error_return_trace)) return .none; + if (!mod.comp.bin_file.options.error_return_tracing) return .none; if (block.is_comptime) return .none; @@ -6292,7 +6328,8 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, else => |e| return e, }; - const field_index = sema.structFieldIndex(block, stack_trace_ty, "index", src) catch |err| switch (err) { + const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); + const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, src) catch |err| switch (err) { error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, else => |e| return e, }; @@ -6316,6 +6353,7 @@ fn popErrorReturnTrace( saved_error_trace_index: Air.Inst.Ref, ) CompileError!void { const mod = sema.mod; + const gpa = sema.gpa; var is_non_error: ?bool = null; var is_non_error_inst: Air.Inst.Ref = undefined; if (operand != .none) { @@ -6332,13 +6370,14 @@ fn popErrorReturnTrace( const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, "index", src, stack_trace_ty, true); + const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); + const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store); } else if (is_non_error == null) { // The result might be an error. If it is, we leave the error trace alone. If it isn't, we need // to pop any error trace that may have been propagated from our arguments. - try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Block).Struct.fields.len); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len); const cond_block_inst = try block.addInstAsIndex(.{ .tag = .block, .data = .{ @@ -6350,28 +6389,29 @@ fn popErrorReturnTrace( }); var then_block = block.makeSubBlock(); - defer then_block.instructions.deinit(sema.gpa); + defer then_block.instructions.deinit(gpa); // If non-error, then pop the error return trace by restoring the index. const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, "index", src, stack_trace_ty, true); + const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); + const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store); _ = try then_block.addBr(cond_block_inst, Air.Inst.Ref.void_value); // Otherwise, do nothing var else_block = block.makeSubBlock(); - defer else_block.instructions.deinit(sema.gpa); + defer else_block.instructions.deinit(gpa); _ = try else_block.addBr(cond_block_inst, Air.Inst.Ref.void_value); - try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.CondBr).Struct.fields.len + + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block const cond_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); - try sema.air_instructions.append(sema.gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{ + try sema.air_instructions.append(gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = is_non_error_inst, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(u32, then_block.instructions.items.len), @@ -6414,7 +6454,7 @@ fn zirCall( .direct => .{ .direct = try sema.resolveInst(extra.data.callee) }, .field => blk: { const object_ptr = try sema.resolveInst(extra.data.obj_ptr); - const field_name = sema.code.nullTerminatedString(extra.data.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.data.field_name_start)); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; break :blk try sema.fieldCallBind(block, callee_src, object_ptr, field_name, field_name_src); }, @@ -6509,7 +6549,8 @@ fn zirCall( if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError(mod))) { const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const field_index = try sema.structFieldIndex(block, stack_trace_ty, "index", call_src); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index"); + const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); // Insert a save instruction before the arg resolution + call instructions we just generated const save_inst = try block.insertInst(block_index, .{ @@ -7436,9 +7477,10 @@ fn instantiateGenericCall( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + const module_fn = mod.funcPtr(switch (ip.indexToKey(func_val.toIntern())) { .func => |function| function.index, .ptr => |ptr| mod.declPtr(ptr.addr.decl).val.getFunctionIndex(mod).unwrap().?, else => unreachable, @@ -7567,9 +7609,12 @@ fn instantiateGenericCall( const new_decl_index = try mod.allocateNewDecl(namespace_index, fn_owner_decl.src_node, src_decl.src_scope); const new_decl = mod.declPtr(new_decl_index); // TODO better names for generic function instantiations - const decl_name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{ - fn_owner_decl.name, @enumToInt(new_decl_index), - }); + // The ensureUnusedCapacity here protects against fn_owner_decl.name slice being + // reallocated during getOrPutStringFmt. + try ip.string_bytes.ensureUnusedCapacity(gpa, ip.stringToSlice(fn_owner_decl.name).len + 20); + const decl_name = ip.getOrPutStringFmt(gpa, "{s}__anon_{d}", .{ + ip.stringToSlice(fn_owner_decl.name), @enumToInt(new_decl_index), + }) catch unreachable; new_decl.name = decl_name; new_decl.src_line = fn_owner_decl.src_line; new_decl.is_pub = fn_owner_decl.is_pub; @@ -7590,12 +7635,8 @@ fn instantiateGenericCall( assert(new_decl.dependencies.keys().len == 0); try mod.declareDeclDependencyType(new_decl_index, module_fn.owner_decl, .function_body); - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const new_func = sema.resolveGenericInstantiationType( block, - new_decl_arena_allocator, fn_zir, new_decl, new_decl_index, @@ -7608,7 +7649,6 @@ fn instantiateGenericCall( bound_arg_src, ) catch |err| switch (err) { error.GenericPoison, error.ComptimeReturn => { - new_decl_arena.deinit(); // Resolving the new function type below will possibly declare more decl dependencies // and so we remove them all here in case of error. for (new_decl.dependencies.keys()) |dep_index| { @@ -7623,10 +7663,6 @@ fn instantiateGenericCall( }, else => { assert(mod.monomorphed_funcs.removeContext(new_module_func_index, .{ .mod = mod })); - { - errdefer new_decl_arena.deinit(); - try new_decl.finalizeNewArena(&new_decl_arena); - } // TODO look up the compile error that happened here and attach a note to it // pointing here, at the generic instantiation callsite. if (sema.owner_func) |owner_func| { @@ -7637,9 +7673,7 @@ fn instantiateGenericCall( return err; }, }; - errdefer new_decl_arena.deinit(); - try new_decl.finalizeNewArena(&new_decl_arena); break :callee new_func; } else gop.key_ptr.*; const callee = mod.funcPtr(callee_index); @@ -7729,7 +7763,6 @@ fn instantiateGenericCall( fn resolveGenericInstantiationType( sema: *Sema, block: *Block, - new_decl_arena_allocator: Allocator, fn_zir: Zir, new_decl: *Decl, new_decl_index: Decl.Index, @@ -7755,7 +7788,6 @@ fn resolveGenericInstantiationType( .mod = mod, .gpa = gpa, .arena = sema.arena, - .perm_arena = new_decl_arena_allocator, .code = fn_zir, .owner_decl = new_decl, .owner_decl_index = new_decl_index, @@ -7764,7 +7796,8 @@ fn resolveGenericInstantiationType( .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none, - .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len), + // TODO: fully migrate functions into InternPool + .comptime_args = try mod.tmp_hack_arena.allocator().alloc(TypedValue, uncasted_args.len), .comptime_args_fn_inst = module_fn.zir_body_inst, .preallocated_new_func = new_module_func.toOptional(), .is_generic_instantiation = true, @@ -7931,10 +7964,6 @@ fn resolveGenericInstantiationType( new_decl.owns_tv = true; new_decl.analysis = .complete; - log.debug("generic function '{s}' instantiated with type {}", .{ - new_decl.name, new_decl.ty.fmtDebug(), - }); - // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field // will be populated, ensuring it will have `analyzeBody` called with the ZIR // parameters mapped appropriately. @@ -8134,13 +8163,13 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! _ = block; const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const name = inst_data.get(sema.code); + const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); + _ = try mod.getErrorValue(name); // Create an error set type with only this error value, and return the value. - const kv = try sema.mod.getErrorValue(name); - const error_set_type = try mod.singleErrorSetType(kv.key); + const error_set_type = try mod.singleErrorSetTypeNts(name); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), - .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), + .name = name, } })).toValue()); } @@ -8162,7 +8191,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const err_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; return sema.addConstant(Type.err_int, try mod.intValue( Type.err_int, - (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value, + try mod.getErrorValue(err_name), )); } @@ -8173,8 +8202,8 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat switch (names.len) { 0 => return sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)), 1 => { - const name = mod.intern_pool.stringToSlice(names[0]); - return sema.addIntUnsigned(Type.err_int, mod.global_error_set.get(name).?); + const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(names[0]).?); + return sema.addIntUnsigned(Type.err_int, int); }, else => {}, } @@ -8197,11 +8226,11 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod)); - if (int > sema.mod.global_error_set.count() or int == 0) + if (int > mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); return sema.addConstant(Type.anyerror, (try mod.intern(.{ .err = .{ .ty = .anyerror_type, - .name = mod.intern_pool.getString(sema.mod.error_name_list.items[int]).unwrap().?, + .name = mod.global_error_set.keys()[int], } })).toValue()); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -8917,7 +8946,7 @@ fn handleExternLibName( const FuncLinkSection = union(enum) { generic, default, - explicit: []const u8, + explicit: InternPool.NullTerminatedString, }; fn funcCommon( @@ -9186,9 +9215,9 @@ fn funcCommon( }; sema.owner_decl.@"linksection" = switch (section) { - .generic => undefined, - .default => null, - .explicit => |section_name| try sema.perm_arena.dupeZ(u8, section_name), + .generic => .none, + .default => .none, + .explicit => |section_name| section_name.toOptional(), }; sema.owner_decl.@"align" = alignment orelse 0; sema.owner_decl.@"addrspace" = address_space orelse .generic; @@ -9572,11 +9601,12 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(extra.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start)); const object = try sema.resolveInst(extra.lhs); return sema.fieldVal(block, src, object, field_name, field_name_src); } @@ -9585,11 +9615,12 @@ fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index, initializing: b const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(extra.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start)); const object_ptr = try sema.resolveInst(extra.lhs); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, initializing); } @@ -9603,7 +9634,7 @@ fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object = try sema.resolveInst(extra.lhs); - const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, "field name must be comptime-known"); return sema.fieldVal(block, src, object, field_name, field_name_src); } @@ -9616,7 +9647,7 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object_ptr = try sema.resolveInst(extra.lhs); - const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, "field name must be comptime-known"); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, false); } @@ -10434,6 +10465,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const src_node_offset = inst_data.src_node; @@ -10605,7 +10637,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError i, msg, "unhandled enumeration value: '{s}'", - .{field_name}, + .{ip.stringToSlice(field_name)}, ); } try mod.errNoteNonLazy( @@ -10689,7 +10721,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa); for (operand_ty.errorSetNames(mod)) |error_name_ip| { - const error_name = mod.intern_pool.stringToSlice(error_name_ip); + const error_name = ip.stringToSlice(error_name_ip); if (!seen_errors.contains(error_name) and special_prong != .@"else") { const msg = maybe_msg orelse blk: { maybe_msg = try sema.errMsg( @@ -10758,7 +10790,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, error_names.len); for (error_names) |error_name_ip| { - const error_name = mod.intern_pool.stringToSlice(error_name_ip); + const error_name = ip.stringToSlice(error_name_ip); if (seen_errors.contains(error_name)) continue; names.putAssumeCapacityNoClobber(error_name_ip, {}); @@ -12062,7 +12094,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs); - const field_name = try sema.resolveConstString(block, name_src, extra.rhs, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, "field name must be comptime-known"); const ty = try sema.resolveTypeFields(unresolved_ty); const ip = &mod.intern_pool; @@ -12070,19 +12102,17 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice => { - if (mem.eql(u8, field_name, "ptr")) break :hf true; - if (mem.eql(u8, field_name, "len")) break :hf true; + if (ip.stringEqlSlice(field_name, "ptr")) break :hf true; + if (ip.stringEqlSlice(field_name, "len")) break :hf true; break :hf false; }, else => {}, }, .anon_struct_type => |anon_struct| { if (anon_struct.names.len != 0) { - // If the string is not interned, then the field certainly is not present. - const name_interned = ip.getString(field_name).unwrap() orelse break :hf false; - break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, name_interned) != null; + break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, field_name) != null; } else { - const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; + const field_index = std.fmt.parseUnsigned(u32, ip.stringToSlice(field_name), 10) catch break :hf false; break :hf field_index < ty.structFieldCount(mod); } }, @@ -12097,11 +12127,9 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :hf union_obj.fields.contains(field_name); }, .enum_type => |enum_type| { - // If the string is not interned, then the field certainly is not present. - const name_interned = ip.getString(field_name).unwrap() orelse break :hf false; - break :hf enum_type.nameIndex(ip, name_interned) != null; + break :hf enum_type.nameIndex(ip, field_name) != null; }, - .array_type => break :hf mem.eql(u8, field_name, "len"), + .array_type => break :hf ip.stringEqlSlice(field_name, "len"), else => {}, } return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ @@ -12123,7 +12151,7 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const container_type = try sema.resolveType(block, lhs_src, extra.lhs); - const decl_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "decl name must be comptime-known"); + const decl_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, "decl name must be comptime-known"); try sema.checkNamespaceType(block, lhs_src, container_type); @@ -12218,14 +12246,12 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const err_name = inst_data.get(sema.code); - - // Return the error code from the function. - const kv = try mod.getErrorValue(err_name); - const error_set_type = try mod.singleErrorSetType(kv.key); + const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); + _ = try mod.getErrorValue(name); + const error_set_type = try mod.singleErrorSetTypeNts(name); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), - .name = mod.intern_pool.getString(kv.key).unwrap().?, + .name = name, } })).toValue()); } @@ -15730,12 +15756,7 @@ fn zirThis( return sema.analyzeDeclVal(block, src, this_decl_index); } -fn zirClosureCapture( - sema: *Sema, - block: *Block, - inst: Zir.Inst.Index, -) CompileError!void { - // TODO: Compile error when closed over values are modified +fn zirClosureCapture(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_tok; // Closures are not necessarily constant values. For example, the // code might do something like this: @@ -15754,13 +15775,8 @@ fn zirClosureCapture( try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, capture); } -fn zirClosureGet( - sema: *Sema, - block: *Block, - inst: Zir.Inst.Index, -) CompileError!Air.Inst.Ref { +fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; - // TODO CLOSURE: Test this with inline functions const inst_data = sema.code.instructions.items(.data)[inst].inst_node; var scope: *CaptureScope = mod.declPtr(block.src_decl).src_scope.?; // Note: The target closure must be in this scope list. @@ -15896,7 +15912,7 @@ fn zirBuiltinSrc( const func_name_val = blk: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const name = mem.span(fn_owner_decl.name); + const name = mod.intern_pool.stringToSlice(fn_owner_decl.name); const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, @@ -15965,6 +15981,7 @@ fn zirBuiltinSrc( fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -15995,7 +16012,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Fn", + try ip.getOrPutString(gpa, "Fn"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); try sema.ensureDeclAnalyzed(fn_info_decl_index); @@ -16006,7 +16023,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, fn_info_ty.getNamespaceIndex(mod).unwrap().?, - "Param", + try ip.getOrPutString(gpa, "Param"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); try sema.ensureDeclAnalyzed(param_info_decl_index); @@ -16018,8 +16035,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = mod.typeToFunc(ty).?; const param_ty = info.param_types[i]; const is_generic = param_ty == .generic_poison_type; - const param_ty_val = try mod.intern_pool.get(gpa, .{ .opt = .{ - .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }), + const param_ty_val = try ip.get(gpa, .{ .opt = .{ + .ty = try ip.get(gpa, .{ .opt_type = .type_type }), .val = if (is_generic) .none else param_ty, } }); @@ -16070,7 +16087,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = mod.typeToFunc(ty).?; const ret_ty_opt = try mod.intern(.{ .opt = .{ - .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }), + .ty = try ip.get(gpa, .{ .opt_type = .type_type }), .val = if (info.return_type == .generic_poison_type) .none else info.return_type, } }); @@ -16104,7 +16121,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Int", + try ip.getOrPutString(gpa, "Int"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, int_info_decl_index); try sema.ensureDeclAnalyzed(int_info_decl_index); @@ -16133,7 +16150,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Float", + try ip.getOrPutString(gpa, "Float"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, float_info_decl_index); try sema.ensureDeclAnalyzed(float_info_decl_index); @@ -16166,7 +16183,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, - "Pointer", + try ip.getOrPutString(gpa, "Pointer"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, decl_index); try sema.ensureDeclAnalyzed(decl_index); @@ -16178,7 +16195,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, pointer_ty.getNamespaceIndex(mod).unwrap().?, - "Size", + try ip.getOrPutString(gpa, "Size"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, decl_index); try sema.ensureDeclAnalyzed(decl_index); @@ -16219,7 +16236,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Array", + try ip.getOrPutString(gpa, "Array"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, array_field_ty_decl_index); try sema.ensureDeclAnalyzed(array_field_ty_decl_index); @@ -16251,7 +16268,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Vector", + try ip.getOrPutString(gpa, "Vector"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, vector_field_ty_decl_index); try sema.ensureDeclAnalyzed(vector_field_ty_decl_index); @@ -16281,7 +16298,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Optional", + try ip.getOrPutString(gpa, "Optional"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, optional_field_ty_decl_index); try sema.ensureDeclAnalyzed(optional_field_ty_decl_index); @@ -16312,7 +16329,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Error", + try ip.getOrPutString(gpa, "Error"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); try sema.ensureDeclAnalyzed(set_field_ty_decl_index); @@ -16332,7 +16349,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const names = ty.errorSetNames(mod); const vals = try sema.arena.alloc(InternPool.Index, names.len); for (vals, names) |*field_val, name_ip| { - const name = mod.intern_pool.stringToSlice(name_ip); + const name = ip.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16415,7 +16432,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "ErrorUnion", + try ip.getOrPutString(gpa, "ErrorUnion"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, error_union_field_ty_decl_index); try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index); @@ -16440,7 +16457,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Enum => { // TODO: look into memoizing this result. - const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; const is_exhaustive = Value.makeBool(enum_type.tag_mode != .nonexhaustive); @@ -16452,7 +16469,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "EnumField", + try ip.getOrPutString(gpa, "EnumField"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); @@ -16462,8 +16479,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const enum_field_vals = try sema.arena.alloc(InternPool.Index, enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { - const name_ip = mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names[i]; - const name = mod.intern_pool.stringToSlice(name_ip); + const name_ip = ip.indexToKey(ty.toIntern()).enum_type.names[i]; + const name = ip.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16532,7 +16549,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Enum", + try ip.getOrPutString(gpa, "Enum"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, type_enum_ty_decl_index); try sema.ensureDeclAnalyzed(type_enum_ty_decl_index); @@ -16570,7 +16587,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Union", + try ip.getOrPutString(gpa, "Union"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, type_union_ty_decl_index); try sema.ensureDeclAnalyzed(type_union_ty_decl_index); @@ -16583,7 +16600,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "UnionField", + try ip.getOrPutString(gpa, "UnionField"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); try sema.ensureDeclAnalyzed(union_field_ty_decl_index); @@ -16601,7 +16618,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai for (union_field_vals, 0..) |*field_val, i| { const field = union_fields.values()[i]; - const name = union_fields.keys()[i]; + const name = ip.stringToSlice(union_fields.keys()[i]); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16682,7 +16699,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, - "ContainerLayout", + try ip.getOrPutString(gpa, "ContainerLayout"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, decl_index); try sema.ensureDeclAnalyzed(decl_index); @@ -16721,7 +16738,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Struct", + try ip.getOrPutString(gpa, "Struct"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, type_struct_ty_decl_index); try sema.ensureDeclAnalyzed(type_struct_ty_decl_index); @@ -16734,7 +16751,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "StructField", + try ip.getOrPutString(gpa, "StructField"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); @@ -16749,11 +16766,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var struct_field_vals: []InternPool.Index = &.{}; defer gpa.free(struct_field_vals); fv: { - const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { + const struct_type = switch (ip.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |tuple| { struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len); for (struct_field_vals, 0..) |*struct_field_val, i| { - const anon_struct_type = mod.intern_pool.indexToKey(struct_ty.toIntern()).anon_struct_type; + const anon_struct_type = ip.indexToKey(struct_ty.toIntern()).anon_struct_type; const field_ty = anon_struct_type.types[i]; const field_val = anon_struct_type.values[i]; const name_val = v: { @@ -16761,7 +16778,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = if (tuple.names.len != 0) // https://github.com/ziglang/zig/issues/15709 - @as([]const u8, mod.intern_pool.stringToSlice(tuple.names[i])) + @as([]const u8, ip.stringToSlice(tuple.names[i])) else try std.fmt.allocPrint(sema.arena, "{d}", .{i}); const new_decl_ty = try mod.arrayType(.{ @@ -16815,7 +16832,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai struct_field_vals, struct_obj.fields.keys(), struct_obj.fields.values(), - ) |*field_val, name, field| { + ) |*field_val, name_nts, field| { + const name = ip.stringToSlice(name_nts); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16838,10 +16856,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); }; - const opt_default_val = if (field.default_val.toIntern() == .unreachable_value) + const opt_default_val = if (field.default_val == .none) null else - field.default_val; + field.default_val.toValue(); const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val); const alignment = field.alignment(mod, layout); @@ -16908,7 +16926,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, - "ContainerLayout", + try ip.getOrPutString(gpa, "ContainerLayout"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, decl_index); try sema.ensureDeclAnalyzed(decl_index); @@ -16945,7 +16963,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Opaque", + try ip.getOrPutString(gpa, "Opaque"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, type_opaque_ty_decl_index); try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index); @@ -16982,6 +17000,8 @@ fn typeInfoDecls( opt_namespace: Module.Namespace.OptionalIndex, ) CompileError!InternPool.Index { const mod = sema.mod; + const gpa = sema.gpa; + var decls_anon_decl = try block.startAnonDecl(); defer decls_anon_decl.deinit(); @@ -16990,7 +17010,7 @@ fn typeInfoDecls( block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Declaration", + try mod.intern_pool.getOrPutString(gpa, "Declaration"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); try sema.ensureDeclAnalyzed(declaration_ty_decl_index); @@ -16999,10 +17019,10 @@ fn typeInfoDecls( }; try sema.queueFullTypeResolution(declaration_ty); - var decl_vals = std.ArrayList(InternPool.Index).init(sema.gpa); + var decl_vals = std.ArrayList(InternPool.Index).init(gpa); defer decl_vals.deinit(); - var seen_namespaces = std.AutoHashMap(*Namespace, void).init(sema.gpa); + var seen_namespaces = std.AutoHashMap(*Namespace, void).init(gpa); defer seen_namespaces.deinit(); if (opt_namespace.unwrap()) |namespace_index| { @@ -17061,7 +17081,7 @@ fn typeInfoNamespaceDecls( const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const name = mem.span(decl.name); + const name = mod.intern_pool.stringToSlice(decl.name); const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, @@ -17696,15 +17716,14 @@ fn zirRetErrValue( ) CompileError!Zir.Inst.Index { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const err_name = inst_data.get(sema.code); + const err_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); + _ = try mod.getErrorValue(err_name); const src = inst_data.src(); - // Return the error code from the function. - const kv = try mod.getErrorValue(err_name); - const error_set_type = try mod.singleErrorSetType(err_name); + const error_set_type = try mod.singleErrorSetTypeNts(err_name); const result_inst = try sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), - .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), + .name = err_name, } })).toValue()); return sema.analyzeRet(block, result_inst, src); } @@ -18177,7 +18196,7 @@ fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const init_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.UnionInit, inst_data.payload_index).data; const union_ty = try sema.resolveType(block, ty_src, extra.union_type); - const field_name = try sema.resolveConstString(block, field_src, extra.field_name, "name of field being initialized must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, "name of field being initialized must be comptime-known"); const init = try sema.resolveInst(extra.init); return sema.unionInit(block, init, init_src, union_ty, ty_src, field_name, field_src); } @@ -18189,7 +18208,7 @@ fn unionInit( init_src: LazySrcLoc, union_ty: Type, union_ty_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const mod = sema.mod; @@ -18257,7 +18276,7 @@ fn zirStructInit( const field_type_data = zir_datas[item.data.field_type].pl_node; const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); + const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start)); const field_index = if (resolved_ty.isTuple(mod)) try sema.tupleFieldIndex(block, resolved_ty, field_name, field_src) else @@ -18298,7 +18317,7 @@ fn zirStructInit( const field_type_data = zir_datas[item.data.field_type].pl_node; const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); + const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start)); const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); @@ -18347,12 +18366,12 @@ fn finishStructInit( is_ref: bool, ) CompileError!Air.Inst.Ref { const mod = sema.mod; - const gpa = sema.gpa; + const ip = &mod.intern_pool; var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { + switch (ip.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |anon_struct| { for (anon_struct.types, anon_struct.values, 0..) |field_ty, default_val, i| { if (field_inits[i] != .none) continue; @@ -18366,9 +18385,9 @@ fn finishStructInit( root_msg = try sema.errMsg(block, init_src, template, .{i}); } } else { - const field_name = mod.intern_pool.stringToSlice(anon_struct.names[i]); + const field_name = anon_struct.names[i]; const template = "missing struct field: {s}"; - const args = .{field_name}; + const args = .{ip.stringToSlice(field_name)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -18385,17 +18404,17 @@ fn finishStructInit( for (struct_obj.fields.values(), 0..) |field, i| { if (field_inits[i] != .none) continue; - if (field.default_val.toIntern() == .unreachable_value) { + if (field.default_val == .none) { const field_name = struct_obj.fields.keys()[i]; const template = "missing struct field: {s}"; - const args = .{field_name}; + const args = .{ip.stringToSlice(field_name)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { root_msg = try sema.errMsg(block, init_src, template, args); } } else { - field_inits[i] = try sema.addConstant(field.ty, field.default_val); + field_inits[i] = try sema.addConstant(field.ty, field.default_val.toValue()); } } }, @@ -18404,10 +18423,9 @@ fn finishStructInit( if (root_msg) |msg| { if (mod.typeToStruct(struct_ty)) |struct_obj| { - const fqn = try struct_obj.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); - try sema.mod.errNoteNonLazy( - struct_obj.srcLoc(sema.mod), + const fqn = ip.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); + try mod.errNoteNonLazy( + struct_obj.srcLoc(mod), msg, "struct '{s}' declared here", .{fqn}, @@ -18826,11 +18844,13 @@ fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const ty_src = inst_data.src(); const field_src = inst_data.src(); const aggregate_ty = try sema.resolveType(block, ty_src, extra.container_type); - const field_name = try sema.resolveConstString(block, field_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, "field name must be comptime-known"); return sema.fieldType(block, aggregate_ty, field_name, field_src, ty_src); } fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const ty_src = inst_data.src(); @@ -18843,7 +18863,8 @@ fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A error.GenericPoison => return Air.Inst.Ref.generic_poison_type, else => |e| return e, }; - const field_name = sema.code.nullTerminatedString(extra.name_start); + const zir_field_name = sema.code.nullTerminatedString(extra.name_start); + const field_name = try ip.getOrPutString(sema.gpa, zir_field_name); return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src); } @@ -18851,7 +18872,7 @@ fn fieldType( sema: *Sema, block: *Block, aggregate_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ty_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { @@ -19050,13 +19071,14 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const mod = sema.mod; + const ip = &mod.intern_pool; try sema.resolveTypeLayout(operand_ty); const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); - const tag_name = mod.intern_pool.indexToKey(val.toIntern()).enum_literal; - const bytes = mod.intern_pool.stringToSlice(tag_name); + const tag_name = ip.indexToKey(val.toIntern()).enum_literal; + const bytes = ip.stringToSlice(tag_name); return sema.addStrLit(block, bytes); }, .Enum => operand_ty, @@ -19089,7 +19111,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const enum_decl = mod.declPtr(enum_decl_index); const msg = msg: { const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{s}'", .{ - val.fmtValue(enum_ty, sema.mod), enum_decl.name, + val.fmtValue(enum_ty, sema.mod), ip.stringToSlice(enum_decl.name), }); errdefer msg.destroy(sema.gpa); try mod.errNoteNonLazy(enum_decl.srcLoc(mod), msg, "declared here", .{}); @@ -19098,7 +19120,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithOwnedErrorMsg(msg); }; const field_name = enum_ty.enumFieldName(field_index, mod); - return sema.addStrLit(block, field_name); + return sema.addStrLit(block, ip.stringToSlice(field_name)); } try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety() and sema.mod.backendSupportsFeature(.is_named_enum_value)) { @@ -19119,6 +19141,7 @@ fn zirReify( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const name_strategy = @intToEnum(Zir.Inst.NameStrategy, extended.small); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); @@ -19127,11 +19150,10 @@ fn zirReify( const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstValue(block, operand_src, type_info, "operand to @Type must be comptime-known"); - const union_val = mod.intern_pool.indexToKey(val.toIntern()).un; + const union_val = ip.indexToKey(val.toIntern()).un; const target = mod.getTarget(); if (try union_val.val.toValue().anyUndef(mod)) return sema.failWithUseOfUndef(block, src); const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag.toValue(), mod).?; - const ip = &mod.intern_pool; switch (@intToEnum(std.builtin.TypeId, tag_index)) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, @@ -19145,8 +19167,14 @@ fn zirReify( .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const signedness_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("signedness").?); - const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("bits").?); + const signedness_val = try union_val.val.toValue().fieldValue( + mod, + fields.getIndex(try ip.getOrPutString(gpa, "signedness")).?, + ); + const bits_val = try union_val.val.toValue().fieldValue( + mod, + fields.getIndex(try ip.getOrPutString(gpa, "bits")).?, + ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); @@ -19155,8 +19183,12 @@ fn zirReify( }, .Vector => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("len").?); - const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("child").?); + const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "len"), + ).?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); const len = @intCast(u32, len_val.toUnsignedInt(mod)); const child_ty = child_val.toType(); @@ -19171,7 +19203,9 @@ fn zirReify( }, .Float => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("bits").?); + const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "bits"), + ).?); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = switch (bits) { @@ -19186,14 +19220,30 @@ fn zirReify( }, .Pointer => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const size_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("size").?); - const is_const_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_const").?); - const is_volatile_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_volatile").?); - const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("alignment").?); - const address_space_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("address_space").?); - const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("child").?); - const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_allowzero").?); - const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("sentinel").?); + const size_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "size"), + ).?); + const is_const_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_const"), + ).?); + const is_volatile_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_volatile"), + ).?); + const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); + const address_space_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "address_space"), + ).?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); + const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_allowzero"), + ).?); + const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "sentinel"), + ).?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); @@ -19279,9 +19329,15 @@ fn zirReify( }, .Array => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("len").?); - const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("child").?); - const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("sentinel").?); + const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "len"), + ).?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); + const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "sentinel"), + ).?); const len = len_val.toUnsignedInt(mod); const child_ty = child_val.toType(); @@ -19298,7 +19354,9 @@ fn zirReify( }, .Optional => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("child").?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); const child_ty = child_val.toType(); @@ -19307,8 +19365,12 @@ fn zirReify( }, .ErrorUnion => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const error_set_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("error_set").?); - const payload_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("payload").?); + const error_set_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "error_set"), + ).?); + const payload_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "payload"), + ).?); const error_set_ty = error_set_val.toType(); const payload_ty = payload_val.toType(); @@ -19330,14 +19392,17 @@ fn zirReify( for (0..len) |i| { const elem_val = try payload_val.elemValue(mod, i); const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); - const name_str = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); - const kv = try mod.getErrorValue(name_str); - const name_ip = try mod.intern_pool.getOrPutString(gpa, kv.key); - const gop = names.getOrPutAssumeCapacity(name_ip); + const name = try name_val.toIpString(Type.slice_const_u8, mod); + _ = try mod.getErrorValue(name); + const gop = names.getOrPutAssumeCapacity(name); if (gop.found_existing) { - return sema.fail(block, src, "duplicate error '{s}'", .{name_str}); + return sema.fail(block, src, "duplicate error '{s}'", .{ + ip.stringToSlice(name), + }); } } @@ -19346,11 +19411,21 @@ fn zirReify( }, .Struct => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("layout").?); - const backing_integer_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("backing_integer").?); - const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("fields").?); - const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("decls").?); - const is_tuple_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_tuple").?); + const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "layout"), + ).?); + const backing_integer_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "backing_integer"), + ).?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "fields"), + ).?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); + const is_tuple_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_tuple"), + ).?); const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -19367,10 +19442,18 @@ fn zirReify( }, .Enum => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("tag_type").?); - const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("fields").?); - const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("decls").?); - const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_exhaustive").?); + const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "tag_type"), + ).?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "fields"), + ).?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); + const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_exhaustive"), + ).?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19396,7 +19479,7 @@ fn zirReify( // Define our empty enum decl const fields_len = @intCast(u32, try sema.usizeCast(block, src, fields_val.sliceLen(mod))); - const incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{ + const incomplete_enum = try ip.getIncompleteEnum(gpa, .{ .decl = new_decl_index, .namespace = .none, .fields_len = fields_len, @@ -19407,35 +19490,36 @@ fn zirReify( .explicit, .tag_ty = int_tag_ty.toIntern(), }); - errdefer mod.intern_pool.remove(incomplete_enum.index); + errdefer ip.remove(incomplete_enum.index); new_decl.val = incomplete_enum.index.toValue(); for (0..fields_len) |field_i| { const elem_val = try fields_val.elemValue(mod, field_i); const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); - const value_val = try elem_val.fieldValue(mod, elem_fields.getIndex("value").?); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); + const value_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "value"), + ).?); - const field_name = try name_val.toAllocatedBytes( - Type.slice_const_u8, - sema.arena, - mod, - ); - const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name); + const field_name = try name_val.toIpString(Type.slice_const_u8, mod); if (!try sema.intFitsInType(value_val, int_tag_ty, null)) { // TODO: better source location return sema.fail(block, src, "field '{s}' with enumeration value '{}' is too large for backing int type '{}'", .{ - field_name, + ip.stringToSlice(field_name), value_val.fmtValue(Type.comptime_int, mod), int_tag_ty.fmt(mod), }); } - if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name_ip)) |other_index| { + if (try incomplete_enum.addFieldName(ip, gpa, field_name)) |other_index| { const msg = msg: { - const msg = try sema.errMsg(block, src, "duplicate enum field '{s}'", .{field_name}); + const msg = try sema.errMsg(block, src, "duplicate enum field '{s}'", .{ + ip.stringToSlice(field_name), + }); errdefer msg.destroy(gpa); _ = other_index; // TODO: this note is incorrect try sema.errNote(block, src, msg, "other field here", .{}); @@ -19444,7 +19528,7 @@ fn zirReify( return sema.failWithOwnedErrorMsg(msg); } - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| { + if (try incomplete_enum.addFieldValue(ip, gpa, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| { const msg = msg: { const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)}); errdefer msg.destroy(gpa); @@ -19462,7 +19546,9 @@ fn zirReify( }, .Opaque => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("decls").?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19496,22 +19582,29 @@ fn zirReify( .decl = new_decl_index, .namespace = new_namespace_index, } }); - errdefer mod.intern_pool.remove(opaque_ty); + errdefer ip.remove(opaque_ty); new_decl.val = opaque_ty.toValue(); new_namespace.ty = opaque_ty.toType(); - try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; }, .Union => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("layout").?); - const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("tag_type").?); - const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("fields").?); - const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("decls").?); + const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "layout"), + ).?); + const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "tag_type"), + ).?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "fields"), + ).?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19555,7 +19648,7 @@ fn zirReify( const union_obj = mod.unionPtr(union_index); errdefer mod.destroyUnion(union_index); - const union_ty = try mod.intern_pool.get(gpa, .{ .union_type = .{ + const union_ty = try ip.get(gpa, .{ .union_type = .{ .index = union_index, .runtime_tag = if (!tag_type_val.isNull(mod)) .tagged @@ -19566,7 +19659,7 @@ fn zirReify( .ReleaseFast, .ReleaseSmall => .none, }, } }); - errdefer mod.intern_pool.remove(union_ty); + errdefer ip.remove(union_ty); new_decl.val = union_ty.toValue(); new_namespace.ty = union_ty.toType(); @@ -19579,7 +19672,7 @@ fn zirReify( if (tag_type_val.optionalValue(mod)) |payload_val| { union_obj.tag_ty = payload_val.toType(); - const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.toIntern())) { + const enum_type = switch (ip.indexToKey(union_obj.tag_ty.toIntern())) { .enum_type => |x| x, else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}), }; @@ -19597,26 +19690,26 @@ fn zirReify( for (0..fields_len) |i| { const elem_val = try fields_val.elemValue(mod, i); const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); - const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex("type").?); - const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex("alignment").?); - - const field_name = try name_val.toAllocatedBytes( - Type.slice_const_u8, - new_decl_arena_allocator, - mod, - ); - - const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); + const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "type"), + ).?); + const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); + + const field_name = try name_val.toIpString(Type.slice_const_u8, mod); if (enum_field_names.len != 0) { - enum_field_names[i] = field_name_ip; + enum_field_names[i] = field_name; } if (explicit_enum_info) |tag_info| { - const enum_index = tag_info.nameIndex(&mod.intern_pool, field_name_ip) orelse { + const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { - const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) }); + const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ ip.stringToSlice(field_name), union_obj.tag_ty.fmt(mod) }); errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; @@ -19632,7 +19725,7 @@ fn zirReify( const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { // TODO: better source location - return sema.fail(block, src, "duplicate union field {s}", .{field_name}); + return sema.fail(block, src, "duplicate union field {s}", .{ip.stringToSlice(field_name)}); } const field_ty = type_val.toType(); @@ -19688,7 +19781,7 @@ fn zirReify( for (tag_info.names, 0..) |field_name, field_index| { if (explicit_tags_seen[field_index]) continue; try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{ - mod.intern_pool.stringToSlice(field_name), + ip.stringToSlice(field_name), }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); @@ -19700,19 +19793,30 @@ fn zirReify( union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, enum_field_names, null); } - try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; }, .Fn => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const calling_convention_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("calling_convention").?); - const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("alignment").?); - const is_generic_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_generic").?); - const is_var_args_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_var_args").?); - const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("return_type").?); - const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("params").?); + const calling_convention_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "calling_convention"), + ).?); + const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); + const is_generic_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_generic"), + ).?); + const is_var_args_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_var_args"), + ).?); + const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "return_type"), + ).?); + const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "params"), + ).?); const is_generic = is_generic_val.toBool(); if (is_generic) { @@ -19746,9 +19850,15 @@ fn zirReify( for (param_types, 0..) |*param_type, i| { const elem_val = try params_val.elemValue(mod, i); const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const param_is_generic_val = try elem_val.fieldValue(mod, elem_fields.getIndex("is_generic").?); - const param_is_noalias_val = try elem_val.fieldValue(mod, elem_fields.getIndex("is_noalias").?); - const opt_param_type_val = try elem_val.fieldValue(mod, elem_fields.getIndex("type").?); + const param_is_generic_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "is_generic"), + ).?); + const param_is_noalias_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "is_noalias"), + ).?); + const opt_param_type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "type"), + ).?); if (param_is_generic_val.toBool()) { return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{}); @@ -19801,6 +19911,7 @@ fn reifyStruct( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); @@ -19839,11 +19950,11 @@ fn reifyStruct( const struct_obj = mod.structPtr(struct_index); errdefer mod.destroyStruct(struct_index); - const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ + const struct_ty = try ip.get(gpa, .{ .struct_type = .{ .index = struct_index.toOptional(), .namespace = new_namespace_index.toOptional(), } }); - errdefer mod.intern_pool.remove(struct_ty); + errdefer ip.remove(struct_ty); new_decl.val = struct_ty.toValue(); new_namespace.ty = struct_ty.toType(); @@ -19854,12 +19965,22 @@ fn reifyStruct( var i: usize = 0; while (i < fields_len) : (i += 1) { const elem_val = try fields_val.elemValue(mod, i); - const elem_fields = mod.intern_pool.typeOf(elem_val.toIntern()).toType().structFields(mod); - const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); - const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex("type").?); - const default_value_val = try elem_val.fieldValue(mod, elem_fields.getIndex("default_value").?); - const is_comptime_val = try elem_val.fieldValue(mod, elem_fields.getIndex("is_comptime").?); - const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex("alignment").?); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); + const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "type"), + ).?); + const default_value_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "default_value"), + ).?); + const is_comptime_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "is_comptime"), + ).?); + const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); @@ -19874,19 +19995,15 @@ fn reifyStruct( return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{}); } - const field_name = try name_val.toAllocatedBytes( - Type.slice_const_u8, - new_decl_arena_allocator, - mod, - ); + const field_name = try name_val.toIpString(Type.slice_const_u8, mod); if (is_tuple) { - const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch { + const field_index = std.fmt.parseUnsigned(u32, ip.stringToSlice(field_name), 10) catch { return sema.fail( block, src, "tuple cannot have non-numeric field '{s}'", - .{field_name}, + .{ip.stringToSlice(field_name)}, ); }; @@ -19902,16 +20019,16 @@ fn reifyStruct( const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { // TODO: better source location - return sema.fail(block, src, "duplicate struct field {s}", .{field_name}); + return sema.fail(block, src, "duplicate struct field {s}", .{ip.stringToSlice(field_name)}); } const field_ty = type_val.toType(); const default_val = if (default_value_val.optionalValue(mod)) |opt_val| - try sema.pointerDeref(block, src, opt_val, try mod.singleConstPtrType(field_ty)) orelse - return sema.failWithNeededComptime(block, src, "struct field default value must be comptime-known") + (try sema.pointerDeref(block, src, opt_val, try mod.singleConstPtrType(field_ty)) orelse + return sema.failWithNeededComptime(block, src, "struct field default value must be comptime-known")).toIntern() else - Value.@"unreachable"; - if (is_comptime_val.toBool() and default_val.toIntern() == .unreachable_value) { + .none; + if (is_comptime_val.toBool() and default_val == .none) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } @@ -20000,7 +20117,6 @@ fn reifyStruct( struct_obj.status = .have_layout; } - try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; @@ -20871,7 +20987,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty = try sema.resolveType(block, lhs_src, extra.lhs); - const field_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "name of field must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, "name of field must be comptime-known"); const mod = sema.mod; try sema.resolveTypeLayout(ty); @@ -20889,7 +21005,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 } const field_index = if (ty.isTuple(mod)) blk: { - if (mem.eql(u8, field_name, "len")) { + if (mod.intern_pool.stringEqlSlice(field_name, "len")) { return sema.fail(block, src, "no offset available for 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src); @@ -21351,6 +21467,8 @@ fn resolveExportOptions( zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.ExportOptions { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const export_options_ty = try sema.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); @@ -21360,16 +21478,16 @@ fn resolveExportOptions( const section_src = sema.maybeOptionsSrc(block, src, "section"); const visibility_src = sema.maybeOptionsSrc(block, src, "visibility"); - const name_operand = try sema.fieldVal(block, src, options, "name", name_src); + const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); const name_ty = Type.slice_const_u8; const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); - const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); + const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known"); const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const section_operand = try sema.fieldVal(block, src, options, "section", section_src); + const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "section"), section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); const section_ty = Type.slice_const_u8; const section = if (section_opt_val.optionalValue(mod)) |section_val| @@ -21377,7 +21495,7 @@ fn resolveExportOptions( else null; - const visibility_operand = try sema.fieldVal(block, src, options, "visibility", visibility_src); + const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "visibility"), visibility_src); const visibility_val = try sema.resolveConstValue(block, visibility_src, visibility_operand, "visibility of exported value must be comptime-known"); const visibility = mod.toEnum(std.builtin.SymbolVisibility, visibility_val); @@ -22217,10 +22335,11 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const parent_ty = try sema.resolveType(block, ty_src, extra.parent_type); - const field_name = try sema.resolveConstString(block, name_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, name_src, extra.field_name, "field name must be comptime-known"); const field_ptr = try sema.resolveInst(extra.field_ptr); const field_ptr_ty = sema.typeOf(field_ptr); const mod = sema.mod; + const ip = &mod.intern_pool; if (parent_ty.zigTypeTag(mod) != .Struct and parent_ty.zigTypeTag(mod) != .Union) { return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)}); @@ -22230,7 +22349,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const field_index = switch (parent_ty.zigTypeTag(mod)) { .Struct => blk: { if (parent_ty.isTuple(mod)) { - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, parent_ty, field_name, name_src); @@ -22276,7 +22395,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { - const field = switch (mod.intern_pool.indexToKey(field_ptr_val.toIntern())) { + const field = switch (ip.indexToKey(field_ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .field => |field| field, else => null, @@ -22291,7 +22410,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr src, "field '{s}' has index '{d}' but pointer value is index '{d}' of struct '{}'", .{ - field_name, + ip.stringToSlice(field_name), field_index, field.index, parent_ty.fmt(sema.mod), @@ -22807,6 +22926,8 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -22824,7 +22945,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_elem_ty = dest_ptr_ty.elemType2(mod); const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: { - const len_air_ref = try sema.fieldVal(block, src, dest_ptr, "len", dest_src); + const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len"), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?; @@ -23068,11 +23189,11 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk FuncLinkSection{ .generic = {} }; } - break :blk FuncLinkSection{ .explicit = try val.toAllocatedBytes(ty, sema.arena, sema.mod) }; + break :blk FuncLinkSection{ .explicit = try val.toIpString(ty, mod) }; } else if (extra.data.bits.has_section_ref) blk: { const section_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; - const section_name = sema.resolveConstString(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) { + const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) { error.GenericPoison => { break :blk FuncLinkSection{ .generic = {} }; }, @@ -23272,6 +23393,8 @@ fn resolvePrefetchOptions( zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.PrefetchOptions { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const options_ty = try sema.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); @@ -23279,13 +23402,13 @@ fn resolvePrefetchOptions( const locality_src = sema.maybeOptionsSrc(block, src, "locality"); const cache_src = sema.maybeOptionsSrc(block, src, "cache"); - const rw = try sema.fieldVal(block, src, options, "rw", rw_src); + const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "rw"), rw_src); const rw_val = try sema.resolveConstValue(block, rw_src, rw, "prefetch read/write must be comptime-known"); - const locality = try sema.fieldVal(block, src, options, "locality", locality_src); + const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "locality"), locality_src); const locality_val = try sema.resolveConstValue(block, locality_src, locality, "prefetch locality must be comptime-known"); - const cache = try sema.fieldVal(block, src, options, "cache", cache_src); + const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "cache"), cache_src); const cache_val = try sema.resolveConstValue(block, cache_src, cache, "prefetch cache must be comptime-known"); return std.builtin.PrefetchOptions{ @@ -23336,6 +23459,8 @@ fn resolveExternOptions( zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.ExternOptions { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const options_inst = try sema.resolveInst(zir_ref); const extern_options_ty = try sema.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); @@ -23345,18 +23470,18 @@ fn resolveExternOptions( const linkage_src = sema.maybeOptionsSrc(block, src, "linkage"); const thread_local_src = sema.maybeOptionsSrc(block, src, "thread_local"); - const name_ref = try sema.fieldVal(block, src, options, "name", name_src); + const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src); const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known"); const name = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); - const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src); + const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "library_name"), library_src); const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known"); - const linkage_ref = try sema.fieldVal(block, src, options, "linkage", linkage_src); + const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_ref, "linkage of the extern symbol must be comptime-known"); const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src); + const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "is_thread_local"), thread_local_src); const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known"); const library_name = if (library_name_val.optionalValue(mod)) |payload| blk: { @@ -23425,7 +23550,7 @@ fn zirBuiltinExtern( const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); errdefer mod.destroyDecl(new_decl_index); const new_decl = mod.declPtr(new_decl_index); - new_decl.name = try sema.gpa.dupeZ(u8, options.name); + new_decl.name = try mod.intern_pool.getOrPutString(sema.gpa, options.name); { const new_var = try mod.intern(.{ .variable = .{ @@ -23444,7 +23569,7 @@ fn zirBuiltinExtern( new_decl.ty = ty; new_decl.val = new_var.toValue(); new_decl.@"align" = 0; - new_decl.@"linksection" = null; + new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; @@ -24265,12 +24390,13 @@ fn safetyPanic( panic_id: PanicId, ) CompileError!void { const mod = sema.mod; + const gpa = sema.gpa; const panic_messages_ty = try sema.getBuiltinType("panic_messages"); const msg_decl_index = (try sema.namespaceLookup( block, sema.src, panic_messages_ty.getNamespaceIndex(mod).unwrap().?, - @tagName(panic_id), + try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id)), )).?; const msg_inst = try sema.analyzeDeclVal(block, sema.src, msg_decl_index); @@ -24302,14 +24428,13 @@ fn fieldVal( block: *Block, src: LazySrcLoc, object: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldPtr`. This function takes a value and returns a value. const mod = sema.mod; - const gpa = sema.gpa; const ip = &mod.intern_pool; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); @@ -24326,12 +24451,12 @@ fn fieldVal( switch (inner_ty.zigTypeTag(mod)) { .Array => { - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { return sema.addConstant( Type.usize, try mod.intValue(Type.usize, inner_ty.arrayLen(mod)), ); - } else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) { + } else if (ip.stringEqlSlice(field_name, "ptr") and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = ptr_info.pointee_type.childType(mod), @@ -24352,20 +24477,20 @@ fn fieldVal( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(mod) }, + .{ ip.stringToSlice(field_name), object_ty.fmt(mod) }, ); } }, .Pointer => { const ptr_info = inner_ty.ptrInfo(mod); if (ptr_info.size == .Slice) { - if (mem.eql(u8, field_name, "ptr")) { + if (ip.stringEqlSlice(field_name, "ptr")) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; return sema.analyzeSlicePtr(block, object_src, slice, inner_ty); - } else if (mem.eql(u8, field_name, "len")) { + } else if (ip.stringEqlSlice(field_name, "len")) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else @@ -24376,7 +24501,7 @@ fn fieldVal( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(mod) }, + .{ ip.stringToSlice(field_name), object_ty.fmt(mod) }, ); } } @@ -24392,13 +24517,12 @@ fn fieldVal( switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { - const name = try ip.getOrPutString(gpa, field_name); switch (ip.indexToKey(child_type.toIntern())) { .error_set_type => |error_set_type| blk: { - if (error_set_type.nameIndex(ip, name) != null) break :blk; + if (error_set_type.nameIndex(ip, field_name) != null) break :blk; const msg = msg: { const msg = try sema.errMsg(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(mod), + ip.stringToSlice(field_name), child_type.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, child_type); @@ -24419,10 +24543,10 @@ fn fieldVal( const error_set_type = if (!child_type.isAnyError(mod)) child_type else - try mod.singleErrorSetTypeNts(name); + try mod.singleErrorSetTypeNts(field_name); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), - .name = name, + .name = field_name, } })).toValue()); }, .Union => { @@ -24499,7 +24623,7 @@ fn fieldPtr( block: *Block, src: LazySrcLoc, object_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, initializing: bool, ) CompileError!Air.Inst.Ref { @@ -24507,7 +24631,6 @@ fn fieldPtr( // in `fieldVal`. This function takes a pointer and returns a pointer. const mod = sema.mod; - const gpa = sema.gpa; const ip = &mod.intern_pool; const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); @@ -24528,7 +24651,7 @@ fn fieldPtr( switch (inner_ty.zigTypeTag(mod)) { .Array => { - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( @@ -24541,7 +24664,7 @@ fn fieldPtr( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(mod) }, + .{ ip.stringToSlice(field_name), object_ty.fmt(mod) }, ); } }, @@ -24553,7 +24676,7 @@ fn fieldPtr( const attr_ptr_ty = if (is_pointer_to) object_ty else object_ptr_ty; - if (mem.eql(u8, field_name, "ptr")) { + if (ip.stringEqlSlice(field_name, "ptr")) { const slice_ptr_ty = inner_ty.slicePtrFieldType(mod); const result_ty = try Type.ptr(sema.arena, mod, .{ @@ -24575,7 +24698,7 @@ fn fieldPtr( try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr); - } else if (mem.eql(u8, field_name, "len")) { + } else if (ip.stringEqlSlice(field_name, "len")) { const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = Type.usize, .mutable = attr_ptr_ty.ptrIsMutable(mod), @@ -24600,7 +24723,7 @@ fn fieldPtr( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(mod) }, + .{ ip.stringToSlice(field_name), object_ty.fmt(mod) }, ); } }, @@ -24617,14 +24740,13 @@ fn fieldPtr( switch (child_type.zigTypeTag(mod)) { .ErrorSet => { - const name = try ip.getOrPutString(gpa, field_name); switch (ip.indexToKey(child_type.toIntern())) { .error_set_type => |error_set_type| blk: { - if (error_set_type.nameIndex(ip, name) != null) { + if (error_set_type.nameIndex(ip, field_name) != null) { break :blk; } return sema.fail(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(mod), + ip.stringToSlice(field_name), child_type.fmt(mod), }); }, .inferred_error_set_type => { @@ -24642,12 +24764,12 @@ fn fieldPtr( const error_set_type = if (!child_type.isAnyError(mod)) child_type else - try mod.singleErrorSetTypeNts(name); + try mod.singleErrorSetTypeNts(field_name); return sema.analyzeDeclRef(try anon_decl.finish( error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), - .name = name, + .name = field_name, } })).toValue(), 0, // default alignment )); @@ -24736,13 +24858,14 @@ fn fieldCallBind( block: *Block, src: LazySrcLoc, raw_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!ResolvedFieldCallee { // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. const mod = sema.mod; + const ip = &mod.intern_pool; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C)) @@ -24771,18 +24894,18 @@ fn fieldCallBind( return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr); } else if (struct_ty.isTuple(mod)) { - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)) }; } - if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { + if (std.fmt.parseUnsigned(u32, ip.stringToSlice(field_name), 10)) |field_index| { if (field_index >= struct_ty.structFieldCount(mod)) break :find_field; return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index, mod), field_index, object_ptr); } else |_| {} } else { const max = struct_ty.structFieldCount(mod); - var i: u32 = 0; - while (i < max) : (i += 1) { - if (mem.eql(u8, struct_ty.structFieldName(i, mod), field_name)) { + for (0..max) |i_usize| { + const i = @intCast(u32, i_usize); + if (field_name == struct_ty.structFieldName(i, mod)) { return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i, mod), i, object_ptr); } } @@ -24876,12 +24999,12 @@ fn fieldCallBind( }; const msg = msg: { - const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ field_name, concrete_ty.fmt(mod) }); + const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ ip.stringToSlice(field_name), concrete_ty.fmt(mod) }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, concrete_ty); if (found_decl) |decl_idx| { const decl = mod.declPtr(decl_idx); - try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{s}' is not a member function", .{field_name}); + try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{s}' is not a member function", .{ip.stringToSlice(field_name)}); } break :msg msg; }; @@ -24933,7 +25056,7 @@ fn namespaceLookup( block: *Block, src: LazySrcLoc, namespace: Namespace.Index, - decl_name: []const u8, + decl_name: InternPool.NullTerminatedString, ) CompileError!?Decl.Index { const mod = sema.mod; const gpa = sema.gpa; @@ -24942,7 +25065,7 @@ fn namespaceLookup( if (!decl.is_pub and decl.getFileScope(mod) != block.getFileScope(mod)) { const msg = msg: { const msg = try sema.errMsg(block, src, "'{s}' is not marked 'pub'", .{ - decl_name, + mod.intern_pool.stringToSlice(decl_name), }); errdefer msg.destroy(gpa); try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "declared here", .{}); @@ -24960,7 +25083,7 @@ fn namespaceLookupRef( block: *Block, src: LazySrcLoc, namespace: Namespace.Index, - decl_name: []const u8, + decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; try sema.addReferencedBy(block, src, decl); @@ -24972,7 +25095,7 @@ fn namespaceLookupVal( block: *Block, src: LazySrcLoc, namespace: Namespace.Index, - decl_name: []const u8, + decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; return try sema.analyzeDeclVal(block, src, decl); @@ -24983,7 +25106,7 @@ fn structFieldPtr( block: *Block, src: LazySrcLoc, struct_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, initializing: bool, @@ -24995,7 +25118,7 @@ fn structFieldPtr( try sema.resolveStructLayout(struct_ty); if (struct_ty.isTuple(mod)) { - if (mem.eql(u8, field_name, "len")) { + if (mod.intern_pool.stringEqlSlice(field_name, "len")) { const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)); return sema.analyzeRef(block, src, len_inst); } @@ -25101,7 +25224,7 @@ fn structFieldPtrByIndex( if (field.is_comptime) { const val = try mod.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), - .addr = .{ .comptime_field = try field.default_val.intern(field.ty, mod) }, + .addr = .{ .comptime_field = field.default_val }, } }); return sema.addConstant(ptr_field_ty, val.toValue()); } @@ -25126,7 +25249,7 @@ fn structFieldVal( block: *Block, src: LazySrcLoc, struct_byval: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { @@ -25145,7 +25268,7 @@ fn structFieldVal( const field = struct_obj.fields.values()[field_index]; if (field.is_comptime) { - return sema.addConstant(field.ty, field.default_val); + return sema.addConstant(field.ty, field.default_val.toValue()); } if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { @@ -25176,12 +25299,12 @@ fn tupleFieldVal( block: *Block, src: LazySrcLoc, tuple_byval: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, tuple_ty: Type, ) CompileError!Air.Inst.Ref { const mod = sema.mod; - if (mem.eql(u8, field_name, "len")) { + if (mod.intern_pool.stringEqlSlice(field_name, "len")) { return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount(mod)); } const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src); @@ -25193,11 +25316,12 @@ fn tupleFieldIndex( sema: *Sema, block: *Block, tuple_ty: Type, - field_name: []const u8, + field_name_ip: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!u32 { const mod = sema.mod; - assert(!mem.eql(u8, field_name, "len")); + const field_name = mod.intern_pool.stringToSlice(field_name_ip); + assert(!std.mem.eql(u8, field_name, "len")); if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { if (field_index < tuple_ty.structFieldCount(mod)) return field_index; return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{ @@ -25253,13 +25377,14 @@ fn unionFieldPtr( block: *Block, src: LazySrcLoc, union_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_union_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { const arena = sema.arena; const mod = sema.mod; + const ip = &mod.intern_pool; assert(unresolved_union_ty.zigTypeTag(mod) == .Union); @@ -25281,7 +25406,9 @@ fn unionFieldPtr( const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name}); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{ + ip.stringToSlice(field_name), + }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; @@ -25296,14 +25423,17 @@ fn unionFieldPtr( if (union_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, src); } - const un = mod.intern_pool.indexToKey(union_val.toIntern()).un; + const un = ip.indexToKey(union_val.toIntern()).un; const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const tag_matches = un.tag == field_tag.toIntern(); if (!tag_matches) { const msg = msg: { const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?; const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); - const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); + const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ + ip.stringToSlice(field_name), + ip.stringToSlice(active_field_name), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -25345,11 +25475,12 @@ fn unionFieldVal( block: *Block, src: LazySrcLoc, union_byval: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_union_ty: Type, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); @@ -25361,7 +25492,7 @@ fn unionFieldVal( if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| { if (union_val.isUndef(mod)) return sema.addConstUndef(field.ty); - const un = mod.intern_pool.indexToKey(union_val.toIntern()).un; + const un = ip.indexToKey(union_val.toIntern()).un; const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const tag_matches = un.tag == field_tag.toIntern(); switch (union_obj.layout) { @@ -25372,7 +25503,9 @@ fn unionFieldVal( const msg = msg: { const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?; const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); - const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); + const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ + ip.stringToSlice(field_name), ip.stringToSlice(active_field_name), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -26470,14 +26603,13 @@ fn coerceExtra( // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); const string = mod.intern_pool.indexToKey(val.toIntern()).enum_literal; - const bytes = mod.intern_pool.stringToSlice(string); - const field_index = dest_ty.enumFieldIndex(bytes, mod) orelse { + const field_index = dest_ty.enumFieldIndex(string, mod) orelse { const msg = msg: { const msg = try sema.errMsg( block, inst_src, "no field named '{s}' in enum '{}'", - .{ bytes, dest_ty.fmt(mod) }, + .{ mod.intern_pool.stringToSlice(string), dest_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -27876,10 +28008,7 @@ fn storePtrVal( error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}), }; - const arena = mut_kit.beginArena(mod); - defer mut_kit.finishArena(mod); - - reinterpret.val_ptr.* = (try (try Value.readFromMemory(mut_kit.ty, mod, buffer, arena)).intern(mut_kit.ty, mod)).toValue(); + reinterpret.val_ptr.* = (try (try Value.readFromMemory(mut_kit.ty, mod, buffer, sema.arena)).intern(mut_kit.ty, mod)).toValue(); }, .bad_decl_ty, .bad_ptr_ty => { // TODO show the decl declaration site in a note and explain whether the decl @@ -27913,18 +28042,6 @@ const ComptimePtrMutationKit = struct { bad_ptr_ty, }, ty: Type, - decl_arena: std.heap.ArenaAllocator = undefined, - - fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator { - const decl = mod.declPtr(self.mut_decl.decl); - return decl.value_arena.?.acquire(mod.gpa, &self.decl_arena); - } - - fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void { - const decl = mod.declPtr(self.mut_decl.decl); - decl.value_arena.?.release(&self.decl_arena); - self.decl_arena = undefined; - } }; fn beginComptimePtrMutation( @@ -27966,10 +28083,8 @@ fn beginComptimePtrMutation( // An error union has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the // representation of the error union from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - const payload = try arena.create(Value.Payload.SubValue); + const payload = try sema.arena.create(Value.Payload.SubValue); payload.* = .{ .base = .{ .tag = .eu_payload }, .data = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(), @@ -28019,10 +28134,8 @@ fn beginComptimePtrMutation( // An optional has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the // representation of the optional from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - const payload = try arena.create(Value.Payload.SubValue); + const payload = try sema.arena.create(Value.Payload.SubValue); payload.* = .{ .base = .{ .tag = .opt_payload }, .data = payload_val.toValue(), @@ -28088,8 +28201,7 @@ fn beginComptimePtrMutation( // If we wanted to avoid this, there would need to be special detection // elsewhere to identify when writing a value to an array element that is stored // using the `bytes` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const arena = sema.arena; const bytes = val_ptr.castTag(.bytes).?.data; const dest_len = parent.ty.arrayLenIncludingSentinel(mod); @@ -28121,8 +28233,7 @@ fn beginComptimePtrMutation( // need to be special detection elsewhere to identify when writing a value to an // array element that is stored using the `repeated` tag, and handle it // without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const arena = sema.arena; const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); const array_len_including_sentinel = @@ -28163,8 +28274,7 @@ fn beginComptimePtrMutation( // An array has been initialized to undefined at comptime and now we // are for the first time setting an element. We must change the representation // of the array from `undef` to `array`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const arena = sema.arena; const array_len_including_sentinel = try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); @@ -28261,8 +28371,7 @@ fn beginComptimePtrMutation( parent.mut_decl, ), .repeated => { - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const arena = sema.arena; const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); @memset(elems, val_ptr.castTag(.repeated).?.data); @@ -28325,8 +28434,7 @@ fn beginComptimePtrMutation( // A struct or union has been initialized to undefined at comptime and now we // are for the first time setting a field. We must change the representation // of the struct/union from `undef` to `struct`/`union`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const arena = sema.arena; switch (parent.ty.zigTypeTag(mod)) { .Struct => { @@ -28436,11 +28544,7 @@ fn beginComptimePtrMutationInner( const target = mod.getTarget(); const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; - const decl = mod.declPtr(mut_decl.decl); - var decl_arena: std.heap.ArenaAllocator = undefined; - const allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); - decl_val.* = try decl_val.unintern(allocator, mod); + decl_val.* = try decl_val.unintern(sema.arena, mod); if (coerce_ok) { return ComptimePtrMutationKit{ @@ -28928,6 +29032,7 @@ fn coerceEnumToUnion( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); const tag_ty = union_ty.unionTagType(mod) orelse { @@ -28966,7 +29071,9 @@ fn coerceEnumToUnion( errdefer msg.destroy(sema.gpa); const field_name = union_obj.fields.keys()[field_index]; - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name}); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{ + ip.stringToSlice(field_name), + }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; @@ -28976,11 +29083,14 @@ fn coerceEnumToUnion( const msg = msg: { const field_name = union_obj.fields.keys()[field_index]; const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{s}'", .{ - inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod), field_ty.fmt(sema.mod), field_name, + inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod), + field_ty.fmt(sema.mod), ip.stringToSlice(field_name), }); errdefer msg.destroy(sema.gpa); - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name}); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{ + ip.stringToSlice(field_name), + }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; @@ -29049,7 +29159,10 @@ fn coerceEnumToUnion( const field_name = field.key_ptr.*; const field_ty = field.value_ptr.ty; if (!(try sema.typeHasRuntimeBits(field_ty))) continue; - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' has type '{}'", .{ field_name, field_ty.fmt(sema.mod) }); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' has type '{}'", .{ + ip.stringToSlice(field_name), + field_ty.fmt(sema.mod), + }); } try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -29068,11 +29181,11 @@ fn coerceAnonStructToUnion( const mod = sema.mod; const inst_ty = sema.typeOf(inst); const field_info: union(enum) { - name: []const u8, + name: InternPool.NullTerminatedString, count: usize, } = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 1) - .{ .name = mod.intern_pool.stringToSlice(anon_struct_type.names[0]) } + .{ .name = anon_struct_type.names[0] } else .{ .count = anon_struct_type.names.len }, .struct_type => |struct_type| name: { @@ -29335,6 +29448,7 @@ fn coerceTupleToStruct( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; const struct_ty = try sema.resolveTypeFields(dest_ty); if (struct_ty.isTupleOrAnonStruct(mod)) { @@ -29348,7 +29462,7 @@ fn coerceTupleToStruct( const inst_ty = sema.typeOf(inst); var runtime_src: ?LazySrcLoc = null; - const field_count = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + const field_count = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| struct_obj.fields.count() @@ -29360,11 +29474,11 @@ fn coerceTupleToStruct( const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location // https://github.com/ziglang/zig/issues/15709 - const field_name: []const u8 = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) - mod.intern_pool.stringToSlice(anon_struct_type.names[field_i]) + anon_struct_type.names[field_i] else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}), + try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}), .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], else => unreachable, }; @@ -29378,7 +29492,7 @@ fn coerceTupleToStruct( return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(field.default_val, field.ty, sema.mod)) { + if (!init_val.eql(field.default_val.toValue(), field.ty, sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } @@ -29401,9 +29515,9 @@ fn coerceTupleToStruct( const field_name = fields.keys()[i]; const field = fields.values()[i]; const field_src = inst_src; // TODO better source location - if (field.default_val.toIntern() == .unreachable_value) { + if (field.default_val == .none) { const template = "missing struct field: {s}"; - const args = .{field_name}; + const args = .{ip.stringToSlice(field_name)}; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, args); } else { @@ -29412,9 +29526,9 @@ fn coerceTupleToStruct( continue; } if (runtime_src == null) { - field_vals[i] = field.default_val.toIntern(); + field_vals[i] = field.default_val; } else { - field_ref.* = try sema.addConstant(field.ty, field.default_val); + field_ref.* = try sema.addConstant(field.ty, field.default_val.toValue()); } } @@ -29433,7 +29547,7 @@ fn coerceTupleToStruct( .ty = struct_ty.toIntern(), .storage = .{ .elems = field_vals }, } }); - errdefer mod.intern_pool.remove(struct_val); + errdefer ip.remove(struct_val); return sema.addConstant(struct_ty, struct_val.toValue()); } @@ -29446,7 +29560,8 @@ fn coerceTupleToTuple( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; - const dest_field_count = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + const ip = &mod.intern_pool; + const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| struct_obj.fields.count() @@ -29459,7 +29574,7 @@ fn coerceTupleToTuple( @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const src_field_count = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| struct_obj.fields.count() @@ -29474,30 +29589,26 @@ fn coerceTupleToTuple( const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location // https://github.com/ziglang/zig/issues/15709 - const field_name: []const u8 = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) - mod.intern_pool.stringToSlice(anon_struct_type.names[field_i]) + anon_struct_type.names[field_i] else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}), + try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}), .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], else => unreachable, }; - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{}); - } - const field_ty = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types[field_index_usize].toType(), .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].ty, else => unreachable, }; - const default_val = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.values[field_index_usize], - .struct_type => |struct_type| switch (mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val.toIntern()) { - .unreachable_value => .none, - else => |default_val| default_val, - }, + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val, else => unreachable, }; @@ -29531,12 +29642,9 @@ fn coerceTupleToTuple( for (field_refs, 0..) |*field_ref, i| { if (field_ref.* != .none) continue; - const default_val = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.values[i], - .struct_type => |struct_type| switch (mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val.toIntern()) { - .unreachable_value => .none, - else => |default_val| default_val, - }, + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val, else => unreachable, }; @@ -29552,7 +29660,7 @@ fn coerceTupleToTuple( continue; } const template = "missing struct field: {s}"; - const args = .{tuple_ty.structFieldName(i, mod)}; + const args = .{ip.stringToSlice(tuple_ty.structFieldName(i, mod))}; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, args); } else { @@ -29563,7 +29671,7 @@ fn coerceTupleToTuple( if (runtime_src == null) { field_vals[i] = default_val; } else { - const field_ty = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types[i].toType(), .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].ty, else => unreachable, @@ -31803,15 +31911,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { } if (struct_obj.layout == .Auto and mod.backendSupportsFeature(.field_reordering)) { - const optimized_order = if (struct_obj.owner_decl == sema.owner_decl_index) - try sema.perm_arena.alloc(u32, struct_obj.fields.count()) - else blk: { - const decl = mod.declPtr(struct_obj.owner_decl); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); - break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count()); - }; + const optimized_order = try mod.tmp_hack_arena.allocator().alloc(u32, struct_obj.fields.count()); for (struct_obj.fields.values(), 0..) |field, i| { optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty)) @@ -31852,9 +31952,6 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi const decl_index = struct_obj.owner_decl; const decl = mod.declPtr(decl_index); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; @@ -31880,7 +31977,6 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -31936,7 +32032,6 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .mod = mod, .gpa = gpa, .arena = undefined, - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -32581,6 +32676,7 @@ fn resolveInferredErrorSetTy( fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void { const gpa = mod.gpa; + const ip = &mod.intern_pool; const decl_index = struct_obj.owner_decl; const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; @@ -32628,9 +32724,6 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } const decl = mod.declPtr(decl_index); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -32642,7 +32735,6 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -32674,7 +32766,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } struct_obj.fields = .{}; - try struct_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); + try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); const Field = struct { type_body_len: u32 = 0, @@ -32725,16 +32817,15 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void extra_index += 1; // This string needs to outlive the ZIR code. - const field_name = if (field_name_zir) |some| - try decl_arena_allocator.dupe(u8, some) - else - try std.fmt.allocPrint(decl_arena_allocator, "{d}", .{field_i}); + const field_name = try ip.getOrPutString(gpa, if (field_name_zir) |s| s else try std.fmt.allocPrint(sema.arena, "{d}", .{ + field_i, + })); const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const msg = msg: { const field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i }).lazy; - const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{s}'", .{field_name}); + const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{s}'", .{ip.stringToSlice(field_name)}); errdefer msg.destroy(gpa); const prev_field_index = struct_obj.fields.getIndex(field_name).?; @@ -32748,7 +32839,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void gop.value_ptr.* = .{ .ty = Type.noreturn, .abi_align = 0, - .default_val = Value.@"unreachable", + .default_val = .none, .is_comptime = is_comptime, .offset = undefined, }; @@ -32917,7 +33008,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void }).lazy; return sema.failWithNeededComptime(&block_scope, init_src, "struct field default value must be comptime-known"); }; - field.default_val = try default_val.copy(decl_arena_allocator); + field.default_val = try default_val.intern(field.ty, mod); } } } @@ -32935,6 +33026,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { defer tracy.end(); const gpa = mod.gpa; + const ip = &mod.intern_pool; const decl_index = union_obj.owner_decl; const zir = mod.namespacePtr(union_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[union_obj.zir_index].extended; @@ -32978,9 +33070,6 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { extra_index += body.len; const decl = mod.declPtr(decl_index); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -32992,7 +33081,6 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -33033,7 +33121,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { try ct_decl.intern(mod); } - try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); + try union_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); var int_tag_ty: Type = undefined; var enum_field_names: []InternPool.NullTerminatedString = &.{}; @@ -33070,7 +33158,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } else { // The provided type is the enum tag type. union_obj.tag_ty = provided_ty; - const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.toIntern())) { + const enum_type = switch (ip.indexToKey(union_obj.tag_ty.toIntern())) { .enum_type => |x| x, else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}), }; @@ -33174,10 +33262,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } // This string needs to outlive the ZIR code. - const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); - const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name); + const field_name = try ip.getOrPutString(gpa, field_name_zir); if (enum_field_names.len != 0) { - enum_field_names[field_i] = field_name_ip; + enum_field_names[field_i] = field_name; } const field_ty: Type = if (!has_type) @@ -33205,7 +33292,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { if (gop.found_existing) { const msg = msg: { const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; - const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{s}'", .{field_name}); + const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{s}'", .{ + ip.stringToSlice(field_name), + }); errdefer msg.destroy(gpa); const prev_field_index = union_obj.fields.getIndex(field_name).?; @@ -33218,14 +33307,14 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } if (explicit_enum_info) |tag_info| { - const enum_index = tag_info.nameIndex(&mod.intern_pool, field_name_ip) orelse { + const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{ - field_name, union_obj.tag_ty.fmt(mod), + ip.stringToSlice(field_name), union_obj.tag_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); @@ -33317,7 +33406,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { for (tag_info.names, 0..) |field_name, field_index| { if (explicit_tags_seen[field_index]) continue; try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{ - mod.intern_pool.stringToSlice(field_name), + ip.stringToSlice(field_name), }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); @@ -33345,14 +33434,22 @@ fn generateUnionTagTypeNumbered( union_obj: *Module.Union, ) !Type { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const src_decl = mod.declPtr(block.src_decl); const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope); errdefer mod.destroyDecl(new_decl_index); const name = name: { - const fqn = try union_obj.getFullyQualifiedName(mod); - defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(sema.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); + const prefix = "@typeInfo("; + const fqn = ip.stringToSlice(try union_obj.getFullyQualifiedName(mod)); + const suffix = ").Union.tag_type.?"; + const start = ip.string_bytes.items.len; + try ip.string_bytes.ensureUnusedCapacity(gpa, prefix.len + suffix.len + fqn.len); + ip.string_bytes.appendSliceAssumeCapacity(prefix); + ip.string_bytes.appendSliceAssumeCapacity(fqn); + ip.string_bytes.appendSliceAssumeCapacity(suffix); + break :name try ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, @@ -33390,6 +33487,8 @@ fn generateUnionTagTypeSimple( maybe_union_obj: ?*Module.Union, ) !Type { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const new_decl_index = new_decl_index: { const union_obj = maybe_union_obj orelse { @@ -33402,9 +33501,15 @@ fn generateUnionTagTypeSimple( const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope); errdefer mod.destroyDecl(new_decl_index); const name = name: { - const fqn = try union_obj.getFullyQualifiedName(mod); - defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(sema.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); + const prefix = "@typeInfo("; + const fqn = ip.stringToSlice(try union_obj.getFullyQualifiedName(mod)); + const suffix = ").Union.tag_type.?"; + const start = ip.string_bytes.items.len; + try ip.string_bytes.ensureUnusedCapacity(gpa, prefix.len + suffix.len + fqn.len); + ip.string_bytes.appendSliceAssumeCapacity(prefix); + ip.string_bytes.appendSliceAssumeCapacity(fqn); + ip.string_bytes.appendSliceAssumeCapacity(suffix); + break :name try ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, @@ -33436,7 +33541,9 @@ fn generateUnionTagTypeSimple( } fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { - var wip_captures = try WipCaptureScope.init(sema.gpa, sema.owner_decl.src_scope); + const gpa = sema.gpa; + + var wip_captures = try WipCaptureScope.init(gpa, sema.owner_decl.src_scope); defer wip_captures.deinit(); var block: Block = .{ @@ -33450,19 +33557,20 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { .is_comptime = true, }; defer { - block.instructions.deinit(sema.gpa); - block.params.deinit(sema.gpa); + block.instructions.deinit(gpa); + block.params.deinit(gpa); } const src = LazySrcLoc.nodeOffset(0); const mod = sema.mod; + const ip = &mod.intern_pool; const std_pkg = mod.main_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; const opt_builtin_inst = (try sema.namespaceLookupRef( &block, src, mod.declPtr(std_file.root_decl.unwrap().?).src_namespace, - "builtin", + try ip.getOrPutString(gpa, "builtin"), )) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); const builtin_inst = try sema.analyzeLoad(&block, src, opt_builtin_inst, src); const builtin_ty = sema.analyzeAsType(&block, src, builtin_inst) catch |err| switch (err) { @@ -33473,7 +33581,7 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { &block, src, builtin_ty.getNamespaceIndex(mod).unwrap().?, - name, + try ip.getOrPutString(gpa, name), )) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name}); return sema.analyzeDeclVal(&block, src, opt_ty_decl); } @@ -33608,7 +33716,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const field_vals = try sema.arena.alloc(InternPool.Index, s.fields.count()); for (field_vals, s.fields.values(), 0..) |*field_val, field, i| { if (field.is_comptime) { - field_val.* = try field.default_val.intern(field.ty, mod); + field_val.* = field.default_val; continue; } if (field.ty.eql(resolved_ty, sema.mod)) { @@ -34287,7 +34395,7 @@ fn unionFieldIndex( sema: *Sema, block: *Block, unresolved_union_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { const mod = sema.mod; @@ -34302,7 +34410,7 @@ fn structFieldIndex( sema: *Sema, block: *Block, unresolved_struct_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { const mod = sema.mod; @@ -34321,19 +34429,17 @@ fn anonStructFieldIndex( sema: *Sema, block: *Block, struct_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { const mod = sema.mod; switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |anon_struct_type| for (anon_struct_type.names, 0..) |name, i| { - if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) { - return @intCast(u32, i); - } + if (name == field_name) return @intCast(u32, i); }, .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { for (struct_obj.fields.keys(), 0..) |name, i| { - if (mem.eql(u8, name, field_name)) { + if (name == field_name) { return @intCast(u32, i); } } @@ -34341,7 +34447,7 @@ fn anonStructFieldIndex( else => unreachable, } return sema.fail(block, field_src, "no field named '{s}' in anonymous struct '{}'", .{ - field_name, struct_ty.fmt(sema.mod), + mod.intern_pool.stringToSlice(field_name), struct_ty.fmt(sema.mod), }); } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 1ff3ce9415f5..0d771aa18428 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -201,10 +201,10 @@ pub fn print( }, .variable => return writer.writeAll("(variable)"), .extern_func => |extern_func| return writer.print("(extern function '{s}')", .{ - mod.declPtr(extern_func.decl).name, + mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name), }), - .func => |func| return writer.print("(function '{s}')", .{ - mod.declPtr(mod.funcPtr(func.index).owner_decl).name, + .func => |func| return writer.print("(function '{d}')", .{ + mod.intern_pool.stringToSlice(mod.declPtr(mod.funcPtr(func.index).owner_decl).name), }), .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), @@ -296,19 +296,20 @@ fn printAggregate( } if (ty.zigTypeTag(mod) == .Struct) { try writer.writeAll(".{"); - const max_len = std.math.min(ty.structFieldCount(mod), max_aggregate_items); + const max_len = @min(ty.structFieldCount(mod), max_aggregate_items); - var i: u32 = 0; - while (i < max_len) : (i += 1) { + for (0..max_len) |i| { if (i != 0) try writer.writeAll(", "); - if (switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[i], - .anon_struct_type => |anon_struct_type| if (anon_struct_type.isTuple()) - null - else - mod.intern_pool.stringToSlice(anon_struct_type.names[i]), + + const field_name = switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |x| mod.structPtrUnwrap(x.index).?.fields.keys()[i].toOptional(), + .anon_struct_type => |x| if (x.isTuple()) .none else x.names[i].toOptional(), else => unreachable, - }) |field_name| try writer.print(".{s} = ", .{field_name}); + }; + + if (field_name.unwrap()) |name_ip| try writer.print(".{s} = ", .{ + mod.intern_pool.stringToSlice(name_ip), + }); try print(.{ .ty = ty.structFieldType(i, mod), .val = try val.fieldValue(mod, i), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index d01a93dd0d31..bf945e6983de 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4350,7 +4350,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .data = .{ .reg = .x30 }, }); } else if (func_value.getExternFunc(mod)) |extern_func| { - const decl_name = mem.sliceTo(mod.declPtr(extern_func.decl).name, 0); + const decl_name = mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name); const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.MachO)) |macho_file| { const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index fde5424ddce0..b66012660475 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -276,8 +276,6 @@ pub fn generate( assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; - log.debug("fn {s}", .{fn_owner_decl.name}); - var branch_stack = std.ArrayList(Branch).init(bin_file.allocator); defer { assert(branch_stack.items.len == 1); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 7b1258155c61..9b7ba19c13a1 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2208,7 +2208,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const atom = func.bin_file.getAtomPtr(atom_index); const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type); try func.bin_file.addOrUpdateImport( - mem.sliceTo(ext_decl.name, 0), + mod.intern_pool.stringToSlice(ext_decl.name), atom.getSymbolIndex().?, mod.intern_pool.stringToSliceUnwrap(ext_decl.getOwnedExternFunc(mod).?.lib_name), type_index, @@ -3180,9 +3180,8 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { } }, .err => |err| { - const name = mod.intern_pool.stringToSlice(err.name); - const kv = try mod.getErrorValue(name); - return WValue{ .imm32 = kv.value }; + const int = try mod.getErrorValue(err.name); + return WValue{ .imm32 = int }; }, .error_union => |error_union| { const err_tv: TypedValue = switch (error_union.val) { @@ -3320,18 +3319,15 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod), .int => |int| intStorageAsI32(int.storage, mod), .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod), - .err => |err| @bitCast(i32, mod.global_error_set.get(mod.intern_pool.stringToSlice(err.name)).?), + .err => |err| @bitCast(i32, @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err.name).?)), else => unreachable, }, } - switch (ty.zigTypeTag(mod)) { - .ErrorSet => { - const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError(mod).?) catch unreachable; // passed invalid `Value` to function - return @bitCast(i32, kv.value); - }, + return switch (ty.zigTypeTag(mod)) { + .ErrorSet => @bitCast(i32, val.getErrorInt(mod)), else => unreachable, // Programmer called this function for an illegal type - } + }; } fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 { @@ -6874,8 +6870,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_decl_index).getFullyQualifiedName(mod); - defer mod.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try mod.declPtr(enum_decl_index).getFullyQualifiedName(mod)); const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); // check if we already generated code for this. @@ -7037,9 +7032,8 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lowest: ?u32 = null; var highest: ?u32 = null; - for (names) |name_ip| { - const name = mod.intern_pool.stringToSlice(name_ip); - const err_int = mod.global_error_set.get(name).?; + for (names) |name| { + const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); if (lowest) |*l| { if (err_int < l.*) { l.* = err_int; diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b9cc3f705218..2675d5350a58 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8132,7 +8132,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier })); } else unreachable; } else if (func_value.getExternFunc(mod)) |extern_func| { - const decl_name = mem.sliceTo(mod.declPtr(extern_func.decl).name, 0); + const decl_name = mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name); const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom_index = try self.owner.getSymbolIndex(self); diff --git a/src/codegen.zig b/src/codegen.zig index b0febb5ea72a..77359d78da0a 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -142,11 +142,12 @@ pub fn generateLazySymbol( if (lazy_sym.ty.isAnyError(mod)) { alignment.* = 4; - const err_names = mod.error_name_list.items; + const err_names = mod.global_error_set.keys(); mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, err_names.len), endian); var offset = code.items.len; try code.resize((1 + err_names.len + 1) * 4); - for (err_names) |err_name| { + for (err_names) |err_name_nts| { + const err_name = mod.intern_pool.stringToSlice(err_name_nts); mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); offset += 4; try code.ensureUnusedCapacity(err_name.len + 1); @@ -251,15 +252,13 @@ pub fn generateSymbol( val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian); }, .err => |err| { - const name = mod.intern_pool.stringToSlice(err.name); - const kv = try mod.getErrorValue(name); - try code.writer().writeInt(u16, @intCast(u16, kv.value), endian); + const int = try mod.getErrorValue(err.name); + try code.writer().writeInt(u16, @intCast(u16, int), endian); }, .error_union => |error_union| { const payload_ty = typed_value.ty.errorUnionPayload(mod); - const err_val = switch (error_union.val) { - .err_name => |err_name| @intCast(u16, (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value), + .err_name => |err_name| @intCast(u16, try mod.getErrorValue(err_name)), .payload => @as(u16, 0), }; @@ -974,11 +973,8 @@ pub fn genTypedValue( }, owner_decl_index); }, .ErrorSet => { - const err_name = mod.intern_pool.stringToSlice( - mod.intern_pool.indexToKey(typed_value.val.toIntern()).err.name, - ); - const global_error_set = mod.global_error_set; - const error_index = global_error_set.get(err_name).?; + const err_name = mod.intern_pool.indexToKey(typed_value.val.toIntern()).err.name; + const error_index = mod.global_error_set.getIndex(err_name).?; return GenResult.mcv(.{ .immediate = error_index }); }, .ErrorUnion => { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 4b325122cad6..7b091d682362 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -452,6 +452,7 @@ pub const Function = struct { var promoted = f.object.dg.ctypes.promote(gpa); defer f.object.dg.ctypes.demote(promoted); const arena = promoted.arena.allocator(); + const mod = f.object.dg.module; gop.value_ptr.* = .{ .fn_name = switch (key) { @@ -460,7 +461,7 @@ pub const Function = struct { .never_inline, => |owner_decl| try std.fmt.allocPrint(arena, "zig_{s}_{}__{d}", .{ @tagName(key), - fmtIdent(mem.span(f.object.dg.module.declPtr(owner_decl).name)), + fmtIdent(mod.intern_pool.stringToSlice(mod.declPtr(owner_decl).name)), @enumToInt(owner_decl), }), }, @@ -1465,7 +1466,7 @@ pub const DeclGen = struct { try writer.writeAll(" .payload = {"); } if (field_ty.hasRuntimeBits(mod)) { - try writer.print(" .{ } = ", .{fmtIdent(field_name)}); + try writer.print(" .{ } = ", .{fmtIdent(mod.intern_pool.stringToSlice(field_name))}); try dg.renderValue(writer, field_ty, un.val.toValue(), initializer_type); try writer.writeByte(' '); } else for (ty.unionFields(mod).values()) |field| { @@ -1849,9 +1850,9 @@ pub const DeclGen = struct { try mod.markDeclAlive(decl); if (mod.decl_exports.get(decl_index)) |exports| { - try writer.writeAll(exports.items[export_index].options.name); + try writer.writeAll(mod.intern_pool.stringToSlice(exports.items[export_index].name)); } else if (decl.isExtern(mod)) { - try writer.writeAll(mem.span(decl.name)); + try writer.writeAll(mod.intern_pool.stringToSlice(decl.name)); } else { // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), // expand to 3x the length of its input, but let's cut it off at a much shorter limit. @@ -1987,7 +1988,7 @@ fn renderTypeName( try w.print("{s} {s}{}__{d}", .{ @tagName(tag)["fwd_".len..], attributes, - fmtIdent(mem.span(mod.declPtr(owner_decl).name)), + fmtIdent(mod.intern_pool.stringToSlice(mod.declPtr(owner_decl).name)), @enumToInt(owner_decl), }); }, @@ -2406,11 +2407,12 @@ pub fn genErrDecls(o: *Object) !void { try writer.writeAll("enum {\n"); o.indent_writer.pushIndent(); var max_name_len: usize = 0; - for (mod.error_name_list.items[1..], 1..) |name, value| { - max_name_len = std.math.max(name.len, max_name_len); + for (mod.global_error_set.keys()[1..], 1..) |name_nts, value| { + const name = mod.intern_pool.stringToSlice(name_nts); + max_name_len = @max(name.len, max_name_len); const err_val = try mod.intern(.{ .err = .{ .ty = .anyerror_type, - .name = mod.intern_pool.getString(name).unwrap().?, + .name = name_nts, } }); try o.dg.renderValue(writer, Type.anyerror, err_val.toValue(), .Other); try writer.print(" = {d}u,\n", .{value}); @@ -2424,7 +2426,8 @@ pub fn genErrDecls(o: *Object) !void { defer o.dg.gpa.free(name_buf); @memcpy(name_buf[0..name_prefix.len], name_prefix); - for (mod.error_name_list.items) |name| { + for (mod.global_error_set.keys()) |name_nts| { + const name = mod.intern_pool.stringToSlice(name_nts); @memcpy(name_buf[name_prefix.len..][0..name.len], name); const identifier = name_buf[0 .. name_prefix.len + name.len]; @@ -2446,14 +2449,15 @@ pub fn genErrDecls(o: *Object) !void { } const name_array_ty = try mod.arrayType(.{ - .len = mod.error_name_list.items.len, + .len = mod.global_error_set.count(), .child = .slice_const_u8_sentinel_0_type, }); try writer.writeAll("static "); try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete); try writer.writeAll(" = {"); - for (mod.error_name_list.items, 0..) |name, value| { + for (mod.global_error_set.keys(), 0..) |name_nts, value| { + const name = mod.intern_pool.stringToSlice(name_nts); if (value != 0) try writer.writeByte(','); const len_val = try mod.intValue(Type.usize, name.len); @@ -2469,14 +2473,16 @@ fn genExports(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); + const mod = o.dg.module; + const ip = &mod.intern_pool; const fwd_decl_writer = o.dg.fwd_decl.writer(); - if (o.dg.module.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| { + if (mod.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| { for (exports.items[1..], 1..) |@"export", i| { try fwd_decl_writer.writeAll("zig_export("); try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @intCast(u32, i) }); try fwd_decl_writer.print(", {s}, {s});\n", .{ - fmtStringLiteral(exports.items[0].options.name, null), - fmtStringLiteral(@"export".options.name, null), + fmtStringLiteral(ip.stringToSlice(exports.items[0].name), null), + fmtStringLiteral(ip.stringToSlice(@"export".name), null), }); } } @@ -2680,9 +2686,10 @@ pub fn genDecl(o: *Object) !void { if (!is_global) try w.writeAll("static "); if (variable.is_threadlocal) try w.writeAll("zig_threadlocal "); if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage "); - if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| + try w.print("zig_linksection(\"{s}\", ", .{s}); try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.@"align", .complete); - if (decl.@"linksection" != null) try w.writeAll(", read, write)"); + if (decl.@"linksection" != .none) try w.writeAll(", read, write)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer); try w.writeByte(';'); @@ -2697,9 +2704,10 @@ pub fn genDecl(o: *Object) !void { const w = o.writer(); if (!is_global) try w.writeAll("static "); - if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| + try w.print("zig_linksection(\"{s}\", ", .{s}); try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.@"align", .complete); - if (decl.@"linksection" != null) try w.writeAll(", read)"); + if (decl.@"linksection" != .none) try w.writeAll(", read)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer); try w.writeAll(";\n"); @@ -4229,7 +4237,9 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const writer = f.object.writer(); const function = mod.funcPtr(ty_fn.func); - try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name}); + try writer.print("/* dbg func:{s} */\n", .{ + mod.intern_pool.stringToSlice(mod.declPtr(function.owner_decl).name), + }); return .none; } @@ -5176,6 +5186,7 @@ fn fieldLocation( byte_offset: u32, end: void, } { + const ip = &mod.intern_pool; return switch (container_ty.zigTypeTag(mod)) { .Struct => switch (container_ty.containerLayout(mod)) { .Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index| { @@ -5186,7 +5197,7 @@ fn fieldLocation( break .{ .field = if (container_ty.isSimpleTuple(mod)) .{ .field = next_field_index } else - .{ .identifier = container_ty.structFieldName(next_field_index, mod) } }; + .{ .identifier = ip.stringToSlice(container_ty.structFieldName(next_field_index, mod)) } }; } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin, .Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0) .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) } @@ -5204,9 +5215,9 @@ fn fieldLocation( .begin; const field_name = container_ty.unionFields(mod).keys()[field_index]; return .{ .field = if (container_ty.unionTagTypeSafety(mod)) |_| - .{ .payload_identifier = field_name } + .{ .payload_identifier = ip.stringToSlice(field_name) } else - .{ .identifier = field_name } }; + .{ .identifier = ip.stringToSlice(field_name) } }; }, .Packed => .begin, }, @@ -5347,6 +5358,7 @@ fn fieldPtr( fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; + const ip = &mod.intern_pool; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; @@ -5369,7 +5381,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { .Auto, .Extern => if (struct_ty.isSimpleTuple(mod)) .{ .field = extra.field_index } else - .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, + .{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) }, .Packed => { const struct_obj = mod.typeToStruct(struct_ty).?; const int_info = struct_ty.intInfo(mod); @@ -5431,7 +5443,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0) .{ .field = extra.field_index } else - .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, + .{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) }, .union_type => |union_type| field_name: { const union_obj = mod.unionPtr(union_type.index); @@ -5462,9 +5474,9 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { } else { const name = union_obj.fields.keys()[extra.field_index]; break :field_name if (union_type.hasTag()) .{ - .payload_identifier = name, + .payload_identifier = ip.stringToSlice(name), } else .{ - .identifier = name, + .identifier = ip.stringToSlice(name), }; } }, @@ -6723,6 +6735,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; + const ip = &mod.intern_pool; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const inst_ty = f.typeOfIndex(inst); const len = @intCast(usize, inst_ty.arrayLen(mod)); @@ -6773,7 +6786,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple(mod)) .{ .field = field_i } else - .{ .identifier = inst_ty.structFieldName(field_i, mod) }); + .{ .identifier = ip.stringToSlice(inst_ty.structFieldName(field_i, mod)) }); try a.assign(f, writer); try f.writeCValue(writer, element, .Other); try a.end(f, writer); @@ -6851,6 +6864,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; + const ip = &mod.intern_pool; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; @@ -6886,8 +6900,8 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.print("{}", .{try f.fmtIntLiteral(tag_ty, int_val)}); try a.end(f, writer); } - break :field .{ .payload_identifier = field_name }; - } else .{ .identifier = field_name }; + break :field .{ .payload_identifier = ip.stringToSlice(field_name) }; + } else .{ .identifier = ip.stringToSlice(field_name) }; const a = try Assignment.start(f, writer, payload_ty); try f.writeCValueMember(writer, local, field); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index dc1749d42e0e..81ca1dd80d5b 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1953,11 +1953,11 @@ pub const CType = extern union { .name = try if (ty.isSimpleTuple(mod)) std.fmt.allocPrintZ(arena, "f{}", .{field_i}) else - arena.dupeZ(u8, switch (zig_ty_tag) { + arena.dupeZ(u8, mod.intern_pool.stringToSlice(switch (zig_ty_tag) { .Struct => ty.structFieldName(field_i, mod), .Union => ty.unionFields(mod).keys()[field_i], else => unreachable, - }), + })), .type = store.set.typeToIndex(field_ty, mod, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter, .payload => .complete, @@ -2102,12 +2102,13 @@ pub const CType = extern union { }) or !mem.eql( u8, if (ty.isSimpleTuple(mod)) - std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable - else switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i, mod), - .Union => ty.unionFields(mod).keys()[field_i], - else => unreachable, - }, + std.fmt.bufPrintZ(&name_buf, "f{}", .{field_i}) catch unreachable + else + mod.intern_pool.stringToSlice(switch (zig_ty_tag) { + .Struct => ty.structFieldName(field_i, mod), + .Union => ty.unionFields(mod).keys()[field_i], + else => unreachable, + }), mem.span(c_field.name), ) or AlignAs.fieldAlign(ty, field_i, mod).@"align" != c_field.alignas.@"align") return false; @@ -2225,11 +2226,12 @@ pub const CType = extern union { }); hasher.update(if (ty.isSimpleTuple(mod)) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable - else switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i, mod), - .Union => ty.unionFields(mod).keys()[field_i], - else => unreachable, - }); + else + mod.intern_pool.stringToSlice(switch (zig_ty_tag) { + .Struct => ty.structFieldName(field_i, mod), + .Union => ty.unionFields(mod).keys()[field_i], + else => unreachable, + })); autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align"); } }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index be6ca714a613..bd5052809512 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -585,13 +585,13 @@ pub const Object = struct { const slice_ty = Type.slice_const_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); - const error_name_list = mod.error_name_list.items; + const error_name_list = mod.global_error_set.keys(); const llvm_errors = try mod.gpa.alloc(*llvm.Value, error_name_list.len); defer mod.gpa.free(llvm_errors); llvm_errors[0] = llvm_slice_ty.getUndef(); - for (llvm_errors[1..], 0..) |*llvm_error, i| { - const name = error_name_list[1..][i]; + for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name_nts| { + const name = mod.intern_pool.stringToSlice(name_nts); const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_global = self.llvm_module.addGlobal(str_init.typeOf(), ""); str_global.setInitializer(str_init); @@ -671,7 +671,7 @@ pub const Object = struct { const llvm_global = entry.value_ptr.*; // Same logic as below but for externs instead of exports. const decl = mod.declPtr(decl_index); - const other_global = object.getLlvmGlobal(decl.name) orelse continue; + const other_global = object.getLlvmGlobal(mod.intern_pool.stringToSlice(decl.name)) orelse continue; if (other_global == llvm_global) continue; llvm_global.replaceAllUsesWith(other_global); @@ -689,8 +689,7 @@ pub const Object = struct { // case, we need to replace all uses of it with this exported global. // TODO update std.builtin.ExportOptions to have the name be a // null-terminated slice. - const exp_name_z = try mod.gpa.dupeZ(u8, exp.options.name); - defer mod.gpa.free(exp_name_z); + const exp_name_z = mod.intern_pool.stringToSlice(exp.name); const other_global = object.getLlvmGlobal(exp_name_z.ptr) orelse continue; if (other_global == llvm_global) continue; @@ -923,9 +922,8 @@ pub const Object = struct { dg.addFnAttrString(llvm_func, "no-stack-arg-probe", ""); } - if (decl.@"linksection") |section| { + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| llvm_func.setSection(section); - } // Remove all the basic blocks of a function in order to start over, generating // LLVM IR from an empty function body. @@ -1173,7 +1171,7 @@ pub const Object = struct { 0; const subprogram = dib.createFunction( di_file.?.toScope(), - decl.name, + mod.intern_pool.stringToSlice(decl.name), llvm_func.getValueName(), di_file.?, line_number, @@ -1273,22 +1271,26 @@ pub const Object = struct { if (decl.isExtern(mod)) { var free_decl_name = false; const decl_name = decl_name: { + const decl_name = mod.intern_pool.stringToSlice(decl.name); + if (mod.getTarget().isWasm() and try decl.isFunction(mod)) { if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { if (!std.mem.eql(u8, lib_name, "c")) { free_decl_name = true; - break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ decl.name, lib_name }); + break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ + decl_name, lib_name, + }); } } } - break :decl_name std.mem.span(decl.name); + + break :decl_name decl_name; }; defer if (free_decl_name) gpa.free(decl_name); llvm_global.setValueName(decl_name); if (self.getLlvmGlobal(decl_name)) |other_global| { if (other_global != llvm_global) { - log.debug("updateDeclExports isExtern()=true setValueName({s}) conflict", .{decl.name}); try self.extern_collisions.put(gpa, decl_index, {}); } } @@ -1298,11 +1300,11 @@ pub const Object = struct { if (self.di_map.get(decl)) |di_node| { if (try decl.isFunction(mod)) { const di_func = @ptrCast(*llvm.DISubprogram, di_node); - const linkage_name = llvm.MDString.get(self.context, decl.name, std.mem.len(decl.name)); + const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); di_func.replaceLinkageName(linkage_name); } else { const di_global = @ptrCast(*llvm.DIGlobalVariable, di_node); - const linkage_name = llvm.MDString.get(self.context, decl.name, std.mem.len(decl.name)); + const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); di_global.replaceLinkageName(linkage_name); } } @@ -1317,7 +1319,7 @@ pub const Object = struct { } } } else if (exports.len != 0) { - const exp_name = exports[0].options.name; + const exp_name = mod.intern_pool.stringToSlice(exports[0].name); llvm_global.setValueName2(exp_name.ptr, exp_name.len); llvm_global.setUnnamedAddr(.False); if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); @@ -1332,21 +1334,19 @@ pub const Object = struct { di_global.replaceLinkageName(linkage_name); } } - switch (exports[0].options.linkage) { + switch (exports[0].linkage) { .Internal => unreachable, .Strong => llvm_global.setLinkage(.External), .Weak => llvm_global.setLinkage(.WeakODR), .LinkOnce => llvm_global.setLinkage(.LinkOnceODR), } - switch (exports[0].options.visibility) { + switch (exports[0].visibility) { .default => llvm_global.setVisibility(.Default), .hidden => llvm_global.setVisibility(.Hidden), .protected => llvm_global.setVisibility(.Protected), } - if (exports[0].options.section) |section| { - const section_z = try gpa.dupeZ(u8, section); - defer gpa.free(section_z); - llvm_global.setSection(section_z); + if (mod.intern_pool.stringToSliceUnwrap(exports[0].section)) |section| { + llvm_global.setSection(section); } if (decl.val.getVariable(mod)) |variable| { if (variable.is_threadlocal) { @@ -1356,13 +1356,12 @@ pub const Object = struct { // If a Decl is exported more than one time (which is rare), // we add aliases for all but the first export. - // TODO LLVM C API does not support deleting aliases. We need to - // patch it to support this or figure out how to wrap the C++ API ourselves. + // TODO LLVM C API does not support deleting aliases. + // The planned solution to this is https://github.com/ziglang/zig/issues/13265 // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. for (exports[1..]) |exp| { - const exp_name_z = try gpa.dupeZ(u8, exp.options.name); - defer gpa.free(exp_name_z); + const exp_name_z = mod.intern_pool.stringToSlice(exp.name); if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| { alias.setAliasee(llvm_global); @@ -1376,8 +1375,7 @@ pub const Object = struct { } } } else { - const fqn = try decl.getFullyQualifiedName(mod); - defer gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); llvm_global.setValueName2(fqn.ptr, fqn.len); llvm_global.setLinkage(.Internal); if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); @@ -2092,8 +2090,7 @@ pub const Object = struct { const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = field_offset + field_size; - const field_name = try gpa.dupeZ(u8, fields.keys()[field_and_index.index]); - defer gpa.free(field_name); + const field_name = mod.intern_pool.stringToSlice(fields.keys()[field_and_index.index]); try di_fields.append(gpa, dib.createMemberType( fwd_decl.toScope(), @@ -2200,12 +2197,9 @@ pub const Object = struct { const field_size = field.ty.abiSize(mod); const field_align = field.normalAlignment(mod); - const field_name_copy = try gpa.dupeZ(u8, field_name); - defer gpa.free(field_name_copy); - di_fields.appendAssumeCapacity(dib.createMemberType( fwd_decl.toScope(), - field_name_copy, + mod.intern_pool.stringToSlice(field_name), null, // file 0, // line field_size * 8, // size in bits @@ -2327,7 +2321,7 @@ pub const Object = struct { if (fn_info.return_type.toType().isError(mod) and o.module.comp.bin_file.options.error_return_tracing) { - const ptr_ty = try mod.singleMutPtrType(o.getStackTraceType()); + const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } @@ -2384,7 +2378,7 @@ pub const Object = struct { const fields: [0]*llvm.DIType = .{}; return o.di_builder.?.createStructType( try o.namespaceToDebugScope(decl.src_namespace), - decl.name, // TODO use fully qualified name + mod.intern_pool.stringToSlice(decl.name), // TODO use fully qualified name try o.getDIFile(o.gpa, mod.namespacePtr(decl.src_namespace).file_scope), decl.src_line + 1, 0, // size in bits @@ -2399,18 +2393,18 @@ pub const Object = struct { ); } - fn getStackTraceType(o: *Object) Type { + fn getStackTraceType(o: *Object) Allocator.Error!Type { const mod = o.module; const std_pkg = mod.main_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; - const builtin_str: []const u8 = "builtin"; + const builtin_str = try mod.intern_pool.getOrPutString(mod.gpa, "builtin"); const std_namespace = mod.namespacePtr(mod.declPtr(std_file.root_decl.unwrap().?).src_namespace); const builtin_decl = std_namespace.decls .getKeyAdapted(builtin_str, Module.DeclAdapter{ .mod = mod }).?; - const stack_trace_str: []const u8 = "StackTrace"; + const stack_trace_str = try mod.intern_pool.getOrPutString(mod.gpa, "StackTrace"); // buffer is only used for int_type, `builtin` is a struct. const builtin_ty = mod.declPtr(builtin_decl).val.toType(); const builtin_namespace = builtin_ty.getNamespace(mod).?; @@ -2452,16 +2446,13 @@ pub const DeclGen = struct { const decl_index = dg.decl_index; assert(decl.has_tv); - log.debug("gen: {s} type: {}, value: {}", .{ - decl.name, decl.ty.fmtDebug(), decl.val.fmtDebug(), - }); if (decl.val.getExternFunc(mod)) |extern_func| { _ = try dg.resolveLlvmFunction(extern_func.decl); } else { const target = mod.getTarget(); var global = try dg.resolveGlobalDecl(decl_index); global.setAlignment(decl.getAlignment(mod)); - if (decl.@"linksection") |section| global.setSection(section); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| global.setSection(s); assert(decl.has_tv); const init_val = if (decl.val.getVariable(mod)) |variable| init_val: { break :init_val variable.init; @@ -2495,7 +2486,8 @@ pub const DeclGen = struct { new_global.setLinkage(global.getLinkage()); new_global.setUnnamedAddr(global.getUnnamedAddress()); new_global.setAlignment(global.getAlignment()); - if (decl.@"linksection") |section| new_global.setSection(section); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| + new_global.setSection(s); new_global.setInitializer(llvm_init); // TODO: How should this work then the address space of a global changed? global.replaceAllUsesWith(new_global); @@ -2513,7 +2505,7 @@ pub const DeclGen = struct { const is_internal_linkage = !dg.module.decl_exports.contains(decl_index); const di_global = dib.createGlobalVariableExpression( di_file.toScope(), - decl.name, + mod.intern_pool.stringToSlice(decl.name), global.getValueName(), di_file, line_number, @@ -2544,8 +2536,7 @@ pub const DeclGen = struct { const fn_type = try dg.lowerType(zig_fn_type); - const fqn = try decl.getFullyQualifiedName(mod); - defer dg.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace); @@ -2557,7 +2548,7 @@ pub const DeclGen = struct { llvm_fn.setUnnamedAddr(.True); } else { if (target.isWasm()) { - dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); + dg.addFnAttrString(llvm_fn, "wasm-import-name", mod.intern_pool.stringToSlice(decl.name)); if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { if (!std.mem.eql(u8, lib_name, "c")) { dg.addFnAttrString(llvm_fn, "wasm-import-module", lib_name); @@ -2699,8 +2690,7 @@ pub const DeclGen = struct { const mod = dg.module; const decl = mod.declPtr(decl_index); - const fqn = try decl.getFullyQualifiedName(mod); - defer dg.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const target = mod.getTarget(); @@ -2716,7 +2706,7 @@ pub const DeclGen = struct { // This is needed for declarations created by `@extern`. if (decl.isExtern(mod)) { - llvm_global.setValueName(decl.name); + llvm_global.setValueName(mod.intern_pool.stringToSlice(decl.name)); llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); if (decl.val.getVariable(mod)) |variable| { @@ -2811,8 +2801,7 @@ pub const DeclGen = struct { if (gop.found_existing) return gop.value_ptr.*; const opaque_type = mod.intern_pool.indexToKey(t.toIntern()).opaque_type; - const name = try mod.opaqueFullyQualifiedName(opaque_type); - defer gpa.free(name); + const name = mod.intern_pool.stringToSlice(try mod.opaqueFullyQualifiedName(opaque_type)); const llvm_struct_ty = dg.context.structCreateNamed(name); gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls @@ -2963,8 +2952,7 @@ pub const DeclGen = struct { return int_llvm_ty; } - const name = try struct_obj.getFullyQualifiedName(mod); - defer gpa.free(name); + const name = mod.intern_pool.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); const llvm_struct_ty = dg.context.structCreateNamed(name); gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls @@ -3040,8 +3028,7 @@ pub const DeclGen = struct { return enum_tag_llvm_ty; } - const name = try union_obj.getFullyQualifiedName(mod); - defer gpa.free(name); + const name = mod.intern_pool.stringToSlice(try union_obj.getFullyQualifiedName(mod)); const llvm_union_ty = dg.context.structCreateNamed(name); gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls @@ -3119,7 +3106,7 @@ pub const DeclGen = struct { if (fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing) { - const ptr_ty = try mod.singleMutPtrType(dg.object.getStackTraceType()); + const ptr_ty = try mod.singleMutPtrType(try dg.object.getStackTraceType()); try llvm_params.append(try dg.lowerType(ptr_ty)); } @@ -3266,9 +3253,8 @@ pub const DeclGen = struct { }, .err => |err| { const llvm_ty = try dg.lowerType(Type.anyerror); - const name = mod.intern_pool.stringToSlice(err.name); - const kv = try mod.getErrorValue(name); - return llvm_ty.constInt(kv.value, .False); + const int = try mod.getErrorValue(err.name); + return llvm_ty.constInt(int, .False); }, .error_union => |error_union| { const err_tv: TypedValue = switch (error_union.val) { @@ -5960,8 +5946,7 @@ pub const FuncGen = struct { .base_line = self.base_line, }); - const fqn = try decl.getFullyQualifiedName(mod); - defer self.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const is_internal_linkage = !mod.decl_exports.contains(decl_index); const fn_ty = try mod.funcType(.{ @@ -5981,7 +5966,7 @@ pub const FuncGen = struct { }); const subprogram = dib.createFunction( di_file.toScope(), - decl.name, + mod.intern_pool.stringToSlice(decl.name), fqn, di_file, line_number, @@ -8629,9 +8614,8 @@ pub const FuncGen = struct { const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len)); - for (names) |name_ip| { - const name = mod.intern_pool.stringToSlice(name_ip); - const err_int = mod.global_error_set.get(name).?; + for (names) |name| { + const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); const this_tag_int_value = try self.dg.lowerValue(.{ .ty = Type.err_int, .val = try mod.intValue(Type.err_int, err_int), @@ -8681,8 +8665,7 @@ pub const FuncGen = struct { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); - defer self.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod)); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); const param_types = [_]*llvm.Type{try self.dg.lowerType(enum_type.tag_ty.toType())}; @@ -8754,8 +8737,7 @@ pub const FuncGen = struct { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); - defer self.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod)); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); const slice_ty = Type.slice_const_u8_sentinel_0; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 85caec94902f..4fd91aded4bc 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -593,7 +593,6 @@ pub const DeclGen = struct { .extern_func => unreachable, // TODO else => { const result_id = dg.spv.allocId(); - log.debug("addDeclRef: id = {}, index = {}, name = {s}", .{ result_id.id, @enumToInt(spv_decl_index), decl.name }); try self.decl_deps.put(spv_decl_index, {}); @@ -664,9 +663,8 @@ pub const DeclGen = struct { => unreachable, // non-runtime values .int => try self.addInt(ty, val), .err => |err| { - const name = mod.intern_pool.stringToSlice(err.name); - const kv = try mod.getErrorValue(name); - try self.addConstInt(u16, @intCast(u16, kv.value)); + const int = try mod.getErrorValue(err.name); + try self.addConstInt(u16, @intCast(u16, int)); }, .error_union => |error_union| { const payload_ty = ty.errorUnionPayload(mod); @@ -1288,8 +1286,7 @@ pub const DeclGen = struct { member_index += 1; } - const name = try struct_obj.getFullyQualifiedName(self.module); - defer self.module.gpa.free(name); + const name = mod.intern_pool.stringToSlice(try struct_obj.getFullyQualifiedName(self.module)); return try self.spv.resolve(.{ .struct_type = .{ .name = try self.spv.resolveString(name), @@ -1500,7 +1497,6 @@ pub const DeclGen = struct { const spv_decl_index = try self.resolveDecl(self.decl_index); const decl_id = self.spv.declPtr(spv_decl_index).result_id; - log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name }); if (decl.val.getFunction(mod)) |_| { assert(decl.ty.zigTypeTag(mod) == .Fn); @@ -1542,8 +1538,7 @@ pub const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {}); try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.getFullyQualifiedName(self.module); - defer self.module.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(self.module)); try self.spv.sections.debug_names.emit(self.gpa, .OpName, .{ .target = decl_id, diff --git a/src/link.zig b/src/link.zig index a44a7387e9e6..e43153f0b1fb 100644 --- a/src/link.zig +++ b/src/link.zig @@ -502,8 +502,6 @@ pub const File = struct { /// of the final binary. pub fn lowerUnnamedConst(base: *File, tv: TypedValue, decl_index: Module.Decl.Index) UpdateDeclError!u32 { if (build_options.only_c) @compileError("unreachable"); - const decl = base.options.module.?.declPtr(decl_index); - log.debug("lowerUnnamedConst {*} ({s})", .{ decl, decl.name }); switch (base.tag) { // zig fmt: off .coff => return @fieldParentPtr(Coff, "base", base).lowerUnnamedConst(tv, decl_index), @@ -543,7 +541,6 @@ pub const File = struct { /// May be called before or after updateDeclExports for any given Decl. pub fn updateDecl(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void { const decl = module.declPtr(decl_index); - log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty.fmt(module) }); assert(decl.has_tv); if (build_options.only_c) { assert(base.tag == .c); @@ -566,10 +563,6 @@ pub const File = struct { /// May be called before or after updateDeclExports for any given Decl. pub fn updateFunc(base: *File, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) UpdateDeclError!void { const func = module.funcPtr(func_index); - const owner_decl = module.declPtr(func.owner_decl); - log.debug("updateFunc {*} ({s}), type={}", .{ - owner_decl, owner_decl.name, owner_decl.ty.fmt(module), - }); if (build_options.only_c) { assert(base.tag == .c); return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness); @@ -590,9 +583,6 @@ pub const File = struct { pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void { const decl = module.declPtr(decl_index); - log.debug("updateDeclLineNumber {*} ({s}), line={}", .{ - decl, decl.name, decl.src_line + 1, - }); assert(decl.has_tv); if (build_options.only_c) { assert(base.tag == .c); @@ -868,7 +858,6 @@ pub const File = struct { exports: []const *Module.Export, ) UpdateDeclExportsError!void { const decl = module.declPtr(decl_index); - log.debug("updateDeclExports {*} ({s})", .{ decl, decl.name }); assert(decl.has_tv); if (build_options.only_c) { assert(base.tag == .c); diff --git a/src/link/C.zig b/src/link/C.zig index c871d8a02af9..8bfaf1553ce2 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -6,6 +6,7 @@ const fs = std.fs; const C = @This(); const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); @@ -289,11 +290,11 @@ pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !vo } { - var export_names = std.StringHashMapUnmanaged(void){}; + var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; defer export_names.deinit(gpa); try export_names.ensureTotalCapacity(gpa, @intCast(u32, module.decl_exports.entries.len)); for (module.decl_exports.values()) |exports| for (exports.items) |@"export"| - try export_names.put(gpa, @"export".options.name, {}); + try export_names.put(gpa, @"export".name, {}); while (f.remaining_decls.popOrNull()) |kv| { const decl_index = kv.key; @@ -553,7 +554,7 @@ fn flushDecl( self: *C, f: *Flush, decl_index: Module.Decl.Index, - export_names: std.StringHashMapUnmanaged(void), + export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), ) FlushDeclError!void { const gpa = self.base.allocator; const mod = self.base.options.module.?; @@ -571,7 +572,7 @@ fn flushDecl( try self.flushLazyFns(f, decl_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); - if (!(decl.isExtern(mod) and export_names.contains(mem.span(decl.name)))) + if (!(decl.isExtern(mod) and export_names.contains(decl.name))) f.appendBufAssumeCapacity(decl_block.fwd_decl.items); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 8b76e8dd69a7..fec6a86b9133 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1097,8 +1097,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In const atom_index = try self.createAtom(); const sym_name = blk: { - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const index = unnamed_consts.items.len; break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index }); @@ -1324,12 +1323,10 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { } fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, complex_type: coff.ComplexType) !void { - const gpa = self.base.allocator; const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); const required_alignment = decl.getAlignment(mod); @@ -1420,6 +1417,8 @@ pub fn updateDeclExports( @panic("Attempted to compile for object format that was disabled by build configuration"); } + const ip = &mod.intern_pool; + if (build_options.have_llvm) { // Even in the case of LLVM, we need to notice certain exported symbols in order to // detect the default subsystem. @@ -1431,20 +1430,20 @@ pub fn updateDeclExports( else => std.builtin.CallingConvention.C, }; const decl_cc = exported_decl.ty.fnCallingConvention(mod); - if (decl_cc == .C and mem.eql(u8, exp.options.name, "main") and + if (decl_cc == .C and ip.stringEqlSlice(exp.name, "main") and self.base.options.link_libc) { mod.stage1_flags.have_c_main = true; } else if (decl_cc == winapi_cc and self.base.options.target.os.tag == .windows) { - if (mem.eql(u8, exp.options.name, "WinMain")) { + if (ip.stringEqlSlice(exp.name, "WinMain")) { mod.stage1_flags.have_winmain = true; - } else if (mem.eql(u8, exp.options.name, "wWinMain")) { + } else if (ip.stringEqlSlice(exp.name, "wWinMain")) { mod.stage1_flags.have_wwinmain = true; - } else if (mem.eql(u8, exp.options.name, "WinMainCRTStartup")) { + } else if (ip.stringEqlSlice(exp.name, "WinMainCRTStartup")) { mod.stage1_flags.have_winmain_crt_startup = true; - } else if (mem.eql(u8, exp.options.name, "wWinMainCRTStartup")) { + } else if (ip.stringEqlSlice(exp.name, "wWinMainCRTStartup")) { mod.stage1_flags.have_wwinmain_crt_startup = true; - } else if (mem.eql(u8, exp.options.name, "DllMainCRTStartup")) { + } else if (ip.stringEqlSlice(exp.name, "DllMainCRTStartup")) { mod.stage1_flags.have_dllmain_crt_startup = true; } } @@ -1453,9 +1452,6 @@ pub fn updateDeclExports( if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(mod, decl_index, exports); } - const tracy = trace(@src()); - defer tracy.end(); - const gpa = self.base.allocator; const decl = mod.declPtr(decl_index); @@ -1465,12 +1461,13 @@ pub fn updateDeclExports( const decl_metadata = self.decls.getPtr(decl_index).?; for (exports) |exp| { - log.debug("adding new export '{s}'", .{exp.options.name}); + const exp_name = mod.intern_pool.stringToSlice(exp.name); + log.debug("adding new export '{s}'", .{exp_name}); - if (exp.options.section) |section_name| { + if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section_name| { if (!mem.eql(u8, section_name, ".text")) { try mod.failed_exports.putNoClobber( - mod.gpa, + gpa, exp, try Module.ErrorMsg.create( gpa, @@ -1483,9 +1480,9 @@ pub fn updateDeclExports( } } - if (exp.options.linkage == .LinkOnce) { + if (exp.linkage == .LinkOnce) { try mod.failed_exports.putNoClobber( - mod.gpa, + gpa, exp, try Module.ErrorMsg.create( gpa, @@ -1497,19 +1494,19 @@ pub fn updateDeclExports( continue; } - const sym_index = decl_metadata.getExport(self, exp.options.name) orelse blk: { + const sym_index = decl_metadata.getExport(self, exp_name) orelse blk: { const sym_index = try self.allocateSymbol(); try decl_metadata.exports.append(gpa, sym_index); break :blk sym_index; }; const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null }; const sym = self.getSymbolPtr(sym_loc); - try self.setSymbolName(sym, exp.options.name); + try self.setSymbolName(sym, exp_name); sym.value = decl_sym.value; sym.section_number = @intToEnum(coff.SectionNumber, self.text_section_index.? + 1); sym.type = .{ .complex_type = .FUNCTION, .base_type = .NULL }; - switch (exp.options.linkage) { + switch (exp.linkage) { .Strong => { sym.storage_class = .EXTERNAL; }, @@ -1522,9 +1519,15 @@ pub fn updateDeclExports( } } -pub fn deleteDeclExport(self: *Coff, decl_index: Module.Decl.Index, name: []const u8) void { +pub fn deleteDeclExport( + self: *Coff, + decl_index: Module.Decl.Index, + name_ip: InternPool.NullTerminatedString, +) void { if (self.llvm_object) |_| return; const metadata = self.decls.getPtr(decl_index) orelse return; + const mod = self.base.options.module.?; + const name = mod.intern_pool.stringToSlice(name_ip); const sym_index = metadata.getExportPtr(self, name) orelse return; const gpa = self.base.allocator; @@ -2540,6 +2543,7 @@ const ImportTable = @import("Coff/ImportTable.zig"); const Liveness = @import("../Liveness.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Object = @import("Coff/Object.zig"); const Relocation = @import("Coff/Relocation.zig"); const TableSection = @import("table_section.zig").TableSection; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 9d8076f59299..b9b7772260bd 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -358,8 +358,9 @@ pub const DeclState = struct { struct_obj.fields.keys(), struct_obj.fields.values(), 0.., - ) |field_name, field, field_index| { + ) |field_name_ip, field, field_index| { if (!field.ty.hasRuntimeBits(mod)) continue; + const field_name = mod.intern_pool.stringToSlice(field_name_ip); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -469,7 +470,8 @@ pub const DeclState = struct { // DW.AT.member try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{s}\x00", .{field_name}); + try dbg_info_buffer.appendSlice(mod.intern_pool.stringToSlice(field_name)); + try dbg_info_buffer.append(0); // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); @@ -949,8 +951,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) defer tracy.end(); const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("initDeclState {s}{*}", .{ decl_name, decl }); @@ -1273,7 +1274,6 @@ pub fn commitDeclState( } } - log.debug("updateDeclDebugInfoAllocation for '{s}'", .{decl.name}); try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len)); while (decl_state.abbrev_relocs.popOrNull()) |reloc| { @@ -1345,7 +1345,6 @@ pub fn commitDeclState( } } - log.debug("writeDeclDebugInfo for '{s}", .{decl.name}); try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items); } @@ -2523,15 +2522,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { // TODO: don't create a zig type for this, just make the dwarf info // without touching the zig type system. - const names = try arena.alloc(InternPool.NullTerminatedString, module.global_error_set.count()); - { - var it = module.global_error_set.keyIterator(); - var i: usize = 0; - while (it.next()) |key| : (i += 1) { - names[i] = module.intern_pool.getString(key.*).unwrap().?; - } - } - + const names = try arena.dupe(InternPool.NullTerminatedString, module.global_error_set.keys()); std.mem.sort(InternPool.NullTerminatedString, names, {}, InternPool.NullTerminatedString.indexLessThan); const error_ty = try module.intern(.{ .error_set_type = .{ .names = names } }); @@ -2682,8 +2673,8 @@ fn addDbgInfoErrorSet( const error_names = ty.errorSetNames(mod); for (error_names) |error_name_ip| { + const int = try mod.getErrorValue(error_name_ip); const error_name = mod.intern_pool.stringToSlice(error_name_ip); - const kv = mod.getErrorValue(error_name) catch unreachable; // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(error_name.len + 2 + @sizeOf(u64)); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant)); @@ -2691,7 +2682,7 @@ fn addDbgInfoErrorSet( dbg_info_buffer.appendSliceAssumeCapacity(error_name); dbg_info_buffer.appendAssumeCapacity(0); // DW.AT.const_value, DW.FORM.data8 - mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), kv.value, target_endian); + mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), int, target_endian); } // DW.AT.enumeration_type delimit children diff --git a/src/link/Elf.zig b/src/link/Elf.zig index e4fa07620db1..5ac90d4cae17 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -28,6 +28,7 @@ const File = link.File; const Liveness = @import("../Liveness.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); const StringTable = @import("strtab.zig").StringTable; const TableSection = @import("table_section.zig").TableSection; @@ -2480,8 +2481,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.base.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); const required_alignment = decl.getAlignment(mod); @@ -2802,8 +2802,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module const decl = mod.declPtr(decl_index); const name_str_index = blk: { - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const index = unnamed_consts.items.len; const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index }); defer gpa.free(name); @@ -2880,7 +2879,8 @@ pub fn updateDeclExports( try self.global_symbols.ensureUnusedCapacity(gpa, exports.len); for (exports) |exp| { - if (exp.options.section) |section_name| { + const exp_name = mod.intern_pool.stringToSlice(exp.name); + if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section_name| { if (!mem.eql(u8, section_name, ".text")) { try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); mod.failed_exports.putAssumeCapacityNoClobber( @@ -2890,11 +2890,11 @@ pub fn updateDeclExports( continue; } } - const stb_bits: u8 = switch (exp.options.linkage) { + const stb_bits: u8 = switch (exp.linkage) { .Internal => elf.STB_LOCAL, .Strong => blk: { const entry_name = self.base.options.entry orelse "_start"; - if (mem.eql(u8, exp.options.name, entry_name)) { + if (mem.eql(u8, exp_name, entry_name)) { self.entry_addr = decl_sym.st_value; } break :blk elf.STB_GLOBAL; @@ -2910,10 +2910,10 @@ pub fn updateDeclExports( }, }; const stt_bits: u8 = @truncate(u4, decl_sym.st_info); - if (decl_metadata.getExport(self, exp.options.name)) |i| { + if (decl_metadata.getExport(self, exp_name)) |i| { const sym = &self.global_symbols.items[i]; sym.* = .{ - .st_name = try self.shstrtab.insert(gpa, exp.options.name), + .st_name = try self.shstrtab.insert(gpa, exp_name), .st_info = (stb_bits << 4) | stt_bits, .st_other = 0, .st_shndx = shdr_index, @@ -2927,7 +2927,7 @@ pub fn updateDeclExports( }; try decl_metadata.exports.append(gpa, @intCast(u32, i)); self.global_symbols.items[i] = .{ - .st_name = try self.shstrtab.insert(gpa, exp.options.name), + .st_name = try self.shstrtab.insert(gpa, exp_name), .st_info = (stb_bits << 4) | stt_bits, .st_other = 0, .st_shndx = shdr_index, @@ -2944,8 +2944,7 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: Module.Decl.In defer tracy.end(); const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.base.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl }); @@ -2955,11 +2954,15 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: Module.Decl.In } } -pub fn deleteDeclExport(self: *Elf, decl_index: Module.Decl.Index, name: []const u8) void { +pub fn deleteDeclExport( + self: *Elf, + decl_index: Module.Decl.Index, + name: InternPool.NullTerminatedString, +) void { if (self.llvm_object) |_| return; const metadata = self.decls.getPtr(decl_index) orelse return; - const sym_index = metadata.getExportPtr(self, name) orelse return; - log.debug("deleting export '{s}'", .{name}); + const mod = self.base.options.module.?; + const sym_index = metadata.getExportPtr(self, mod.intern_pool.stringToSlice(name)) orelse return; self.global_symbol_free_list.append(self.base.allocator, sym_index.*) catch {}; self.global_symbols.items[sym_index.*].st_info = 0; sym_index.* = 0; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index f7f975f920f3..70993e8dc6d8 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -40,6 +40,7 @@ const Liveness = @import("../Liveness.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; const Md5 = std.crypto.hash.Md5; const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Relocation = @import("MachO/Relocation.zig"); const StringTable = @import("strtab.zig").StringTable; const TableSection = @import("table_section.zig").TableSection; @@ -1921,8 +1922,7 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu const unnamed_consts = gop.value_ptr; const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const name_str_index = blk: { const index = unnamed_consts.items.len; @@ -2206,8 +2206,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D const required_alignment = decl.getAlignment(mod); - const decl_name = try decl.getFullyQualifiedName(module); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(module)); const init_sym_name = try std.fmt.allocPrint(gpa, "{s}$tlv$init", .{decl_name}); defer gpa.free(init_sym_name); @@ -2306,8 +2305,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64 const required_alignment = decl.getAlignment(mod); - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; @@ -2403,12 +2401,14 @@ pub fn updateDeclExports( const decl_metadata = self.decls.getPtr(decl_index).?; for (exports) |exp| { - const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{exp.options.name}); + const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{ + mod.intern_pool.stringToSlice(exp.name), + }); defer gpa.free(exp_name); log.debug("adding new export '{s}'", .{exp_name}); - if (exp.options.section) |section_name| { + if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section_name| { if (!mem.eql(u8, section_name, "__text")) { try mod.failed_exports.putNoClobber( mod.gpa, @@ -2424,7 +2424,7 @@ pub fn updateDeclExports( } } - if (exp.options.linkage == .LinkOnce) { + if (exp.linkage == .LinkOnce) { try mod.failed_exports.putNoClobber( mod.gpa, exp, @@ -2453,7 +2453,7 @@ pub fn updateDeclExports( .n_value = decl_sym.n_value, }; - switch (exp.options.linkage) { + switch (exp.linkage) { .Internal => { // Symbol should be hidden, or in MachO lingo, private extern. // We should also mark the symbol as Weak: n_desc == N_WEAK_DEF. @@ -2488,12 +2488,17 @@ pub fn updateDeclExports( } } -pub fn deleteDeclExport(self: *MachO, decl_index: Module.Decl.Index, name: []const u8) Allocator.Error!void { +pub fn deleteDeclExport( + self: *MachO, + decl_index: Module.Decl.Index, + name: InternPool.NullTerminatedString, +) Allocator.Error!void { if (self.llvm_object) |_| return; const metadata = self.decls.getPtr(decl_index) orelse return; const gpa = self.base.allocator; - const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{name}); + const mod = self.base.options.module.?; + const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{mod.intern_pool.stringToSlice(name)}); defer gpa.free(exp_name); const sym_index = metadata.getExportPtr(self, exp_name) orelse return; diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 0803b6beef2f..5cf2add528d5 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -287,7 +287,6 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: Module.Fn.Index, air: self.freeUnnamedConsts(decl_index); _ = try self.seeDecl(decl_index); - log.debug("codegen decl {*} ({s})", .{ decl, decl.name }); var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -345,8 +344,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I } const unnamed_consts = gop.value_ptr; - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.base.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const index = unnamed_consts.items.len; // name is freed when the unnamed const is freed @@ -403,8 +401,6 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo _ = try self.seeDecl(decl_index); - log.debug("codegen decl {*} ({s}) ({d})", .{ decl, decl.name, decl_index }); - var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; @@ -435,7 +431,6 @@ fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void { const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); const is_fn = (decl.ty.zigTypeTag(mod) == .Fn); - log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name }); const sym_t: aout.Sym.Type = if (is_fn) .t else .d; const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index); @@ -446,7 +441,7 @@ fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void { const sym: aout.Sym = .{ .value = undefined, // the value of stuff gets filled in in flushModule .type = decl_block.type, - .name = mem.span(decl.name), + .name = mod.intern_pool.stringToSlice(decl.name), }; if (decl_block.sym_index) |s| { @@ -567,10 +562,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No var it = fentry.value_ptr.functions.iterator(); while (it.next()) |entry| { const decl_index = entry.key_ptr.*; - const decl = mod.declPtr(decl_index); const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index); const out = entry.value_ptr.*; - log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line + 1, out.end_line }); { // connect the previous decl to the next const delta_line = @intCast(i32, out.start_line) - @intCast(i32, linecount); @@ -616,10 +609,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No var it = self.data_decl_table.iterator(); while (it.next()) |entry| { const decl_index = entry.key_ptr.*; - const decl = mod.declPtr(decl_index); const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index); const code = entry.value_ptr.*; - log.debug("write data decl {*} ({s})", .{ decl, decl.name }); foff += code.len; iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; @@ -695,15 +686,12 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const source_decl = mod.declPtr(source_decl_index); for (kv.value_ptr.items) |reloc| { const target_decl_index = reloc.target; - const target_decl = mod.declPtr(target_decl_index); const target_decl_block = self.getDeclBlock(self.decls.get(target_decl_index).?.index); const target_decl_offset = target_decl_block.offset.?; const offset = reloc.offset; const addend = reloc.addend; - log.debug("relocating the address of '{s}' + {d} into '{s}' + {d}", .{ target_decl.name, addend, source_decl.name, offset }); - const code = blk: { const is_fn = source_decl.ty.zigTypeTag(mod) == .Fn; if (is_fn) { @@ -737,8 +725,9 @@ fn addDeclExports( const decl_block = self.getDeclBlock(metadata.index); for (exports) |exp| { + const exp_name = mod.intern_pool.stringToSlice(exp.name); // plan9 does not support custom sections - if (exp.options.section) |section_name| { + if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section_name| { if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) { try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( self.base.allocator, @@ -752,10 +741,10 @@ fn addDeclExports( const sym = .{ .value = decl_block.offset.?, .type = decl_block.type.toGlobal(), - .name = exp.options.name, + .name = exp_name, }; - if (metadata.getExport(self, exp.options.name)) |i| { + if (metadata.getExport(self, exp_name)) |i| { self.syms.items[i] = sym; } else { try self.syms.append(self.base.allocator, sym); @@ -956,7 +945,10 @@ pub fn writeSym(self: *Plan9, w: anytype, sym: aout.Sym) !void { try w.writeAll(sym.name); try w.writeByte(0); } + pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { + const mod = self.base.options.module.?; + const ip = &mod.intern_pool; const writer = buf.writer(); // write the f symbols { @@ -980,7 +972,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const sym = self.syms.items[decl_block.sym_index.?]; try self.writeSym(writer, sym); if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| { - for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| { + for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.name))) |exp_i| { try self.writeSym(writer, self.syms.items[exp_i]); }; } @@ -1006,7 +998,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const sym = self.syms.items[decl_block.sym_index.?]; try self.writeSym(writer, sym); if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| { - for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| { + for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.name))) |exp_i| { const s = self.syms.items[exp_i]; if (mem.eql(u8, s.name, "_start")) self.entry_val = s.value; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 89d6be1ec8da..fbde464c5457 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -147,7 +147,7 @@ pub fn updateDeclExports( const spv_decl_index = entry.value_ptr.*; for (exports) |exp| { - try self.spv.declareEntryPoint(spv_decl_index, exp.options.name); + try self.spv.declareEntryPoint(spv_decl_index, mod.intern_pool.stringToSlice(exp.name)); } } @@ -190,7 +190,8 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No var error_info = std.ArrayList(u8).init(self.spv.arena); try error_info.appendSlice("zig_errors"); const module = self.base.options.module.?; - for (module.error_name_list.items) |name| { + for (module.global_error_set.keys()) |name_nts| { + const name = module.intern_pool.stringToSlice(name_nts); // Errors can contain pretty much any character - to encode them in a string we must escape // them somehow. Easiest here is to use some established scheme, one which also preseves the // name if it contains no strange characters is nice for debugging. URI encoding fits the bill. diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 96de121ffb4f..d57543542a7c 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1416,7 +1416,7 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi if (decl.isExtern(mod)) { const variable = decl.getOwnedVariable(mod).?; - const name = mem.sliceTo(decl.name, 0); + const name = mod.intern_pool.stringToSlice(decl.name); const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name); return wasm.addOrUpdateImport(name, atom.sym_index, lib_name, null); } @@ -1453,8 +1453,7 @@ pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.I defer tracy.end(); const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer wasm.base.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl }); try dw.updateDeclLineNumber(mod, decl_index); @@ -1467,8 +1466,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8 const atom_index = wasm.decls.get(decl_index).?; const atom = wasm.getAtomPtr(atom_index); const symbol = &wasm.symbols.items[atom.sym_index]; - const full_name = try decl.getFullyQualifiedName(mod); - defer wasm.base.allocator.free(full_name); + const full_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); symbol.name = try wasm.string_table.put(wasm.base.allocator, full_name); try atom.code.appendSlice(wasm.base.allocator, code); try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {}); @@ -1535,9 +1533,10 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In const parent_atom = wasm.getAtomPtr(parent_atom_index); const local_index = parent_atom.locals.items.len; try parent_atom.locals.append(wasm.base.allocator, atom_index); - const fqdn = try decl.getFullyQualifiedName(mod); - defer wasm.base.allocator.free(fqdn); - const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ fqdn, local_index }); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); + const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ + fqn, local_index, + }); defer wasm.base.allocator.free(name); var value_bytes = std.ArrayList(u8).init(wasm.base.allocator); defer value_bytes.deinit(); @@ -1690,11 +1689,12 @@ pub fn updateDeclExports( const decl = mod.declPtr(decl_index); const atom_index = try wasm.getOrCreateAtomForDecl(decl_index); const atom = wasm.getAtom(atom_index); + const gpa = mod.gpa; for (exports) |exp| { - if (exp.options.section) |section| { - try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( - mod.gpa, + if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section| { + try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + gpa, decl.srcLoc(mod), "Unimplemented: ExportOptions.section '{s}'", .{section}, @@ -1702,24 +1702,24 @@ pub fn updateDeclExports( continue; } - const export_name = try wasm.string_table.put(wasm.base.allocator, exp.options.name); + const export_name = try wasm.string_table.put(wasm.base.allocator, mod.intern_pool.stringToSlice(exp.name)); if (wasm.globals.getPtr(export_name)) |existing_loc| { if (existing_loc.index == atom.sym_index) continue; const existing_sym: Symbol = existing_loc.getSymbol(wasm).*; - const exp_is_weak = exp.options.linkage == .Internal or exp.options.linkage == .Weak; + const exp_is_weak = exp.linkage == .Internal or exp.linkage == .Weak; // When both the to-be-exported symbol and the already existing symbol // are strong symbols, we have a linker error. // In the other case we replace one with the other. if (!exp_is_weak and !existing_sym.isWeak()) { - try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( - mod.gpa, + try mod.failed_exports.put(gpa, exp, try Module.ErrorMsg.create( + gpa, decl.srcLoc(mod), \\LinkError: symbol '{s}' defined multiple times \\ first definition in '{s}' \\ next definition in '{s}' , - .{ exp.options.name, wasm.name, wasm.name }, + .{ mod.intern_pool.stringToSlice(exp.name), wasm.name, wasm.name }, )); continue; } else if (exp_is_weak) { @@ -1736,7 +1736,7 @@ pub fn updateDeclExports( const exported_atom = wasm.getAtom(exported_atom_index); const sym_loc = exported_atom.symbolLoc(); const symbol = sym_loc.getSymbol(wasm); - switch (exp.options.linkage) { + switch (exp.linkage) { .Internal => { symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); }, @@ -1745,8 +1745,8 @@ pub fn updateDeclExports( }, .Strong => {}, // symbols are strong by default .LinkOnce => { - try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( - mod.gpa, + try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + gpa, decl.srcLoc(mod), "Unimplemented: LinkOnce", .{}, @@ -1755,7 +1755,7 @@ pub fn updateDeclExports( }, } // Ensure the symbol will be exported using the given name - if (!mem.eql(u8, exp.options.name, sym_loc.getName(wasm))) { + if (!mod.intern_pool.stringEqlSlice(exp.name, sym_loc.getName(wasm))) { try wasm.export_names.put(wasm.base.allocator, sym_loc, export_name); } @@ -1769,7 +1769,7 @@ pub fn updateDeclExports( // if the symbol was previously undefined, remove it as an import _ = wasm.imports.remove(sym_loc); - _ = wasm.undefs.swapRemove(exp.options.name); + _ = wasm.undefs.swapRemove(mod.intern_pool.stringToSlice(exp.name)); } } @@ -2987,7 +2987,8 @@ fn populateErrorNameTable(wasm: *Wasm) !void { // Addend for each relocation to the table var addend: u32 = 0; const mod = wasm.base.options.module.?; - for (mod.error_name_list.items) |error_name| { + for (mod.global_error_set.keys()) |error_name_nts| { + const error_name = mod.intern_pool.stringToSlice(error_name_nts); const len = @intCast(u32, error_name.len + 1); // names are 0-termianted const slice_ty = Type.slice_const_u8_sentinel_0; diff --git a/src/print_air.zig b/src/print_air.zig index 8da80e1360cc..eb104772920a 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -685,8 +685,9 @@ const Writer = struct { fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_fn = w.air.instructions.items(.data)[inst].ty_fn; const func_index = ty_fn.func; + const ip = &w.module.intern_pool; const owner_decl = w.module.declPtr(w.module.funcPtr(func_index).owner_decl); - try s.print("{s}", .{owner_decl.name}); + try s.print("{s}", .{ip.stringToSlice(owner_decl.name)}); } fn writeDbgVar(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { diff --git a/src/type.zig b/src/type.zig index 61c9377b1dad..43aaf3c78692 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2546,7 +2546,7 @@ pub const Type = struct { defer mod.gpa.free(field_vals); for (field_vals, s.fields.values()) |*field_val, field| { if (field.is_comptime) { - field_val.* = try field.default_val.intern(field.ty, mod); + field_val.* = field.default_val; continue; } if (try field.ty.onePossibleValue(mod)) |field_opv| { @@ -2977,18 +2977,14 @@ pub const Type = struct { return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names.len; } - pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) [:0]const u8 { - const ip = &mod.intern_pool; - const field_name = ip.indexToKey(ty.toIntern()).enum_type.names[field_index]; - return ip.stringToSlice(field_name); + pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { + return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names[field_index]; } - pub fn enumFieldIndex(ty: Type, field_name: []const u8, mod: *Module) ?u32 { + pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 { const ip = &mod.intern_pool; const enum_type = ip.indexToKey(ty.toIntern()).enum_type; - // If the string is not interned, then the field certainly is not present. - const field_name_interned = ip.getString(field_name).unwrap() orelse return null; - return enum_type.nameIndex(ip, field_name_interned); + return enum_type.nameIndex(ip, field_name); } /// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or @@ -3017,19 +3013,16 @@ pub const Type = struct { } } - pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) []const u8 { - switch (mod.intern_pool.indexToKey(ty.toIntern())) { + pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.haveFieldTypes()); return struct_obj.fields.keys()[field_index]; }, - .anon_struct_type => |anon_struct| { - const name = anon_struct.names[field_index]; - return mod.intern_pool.stringToSlice(name); - }, + .anon_struct_type => |anon_struct| anon_struct.names[field_index], else => unreachable, - } + }; } pub fn structFieldCount(ty: Type, mod: *Module) usize { @@ -3082,7 +3075,10 @@ pub const Type = struct { switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - return struct_obj.fields.values()[index].default_val; + const val = struct_obj.fields.values()[index].default_val; + // TODO: avoid using `unreachable` to indicate this. + if (val == .none) return Value.@"unreachable"; + return val.toValue(); }, .anon_struct_type => |anon_struct| { const val = anon_struct.values[index]; @@ -3100,7 +3096,7 @@ pub const Type = struct { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; const field = struct_obj.fields.values()[index]; if (field.is_comptime) { - return field.default_val; + return field.default_val.toValue(); } else { return field.ty.onePossibleValue(mod); } diff --git a/src/value.zig b/src/value.zig index 6a19678d7152..2c38852bf575 100644 --- a/src/value.zig +++ b/src/value.zig @@ -24,9 +24,6 @@ pub const Value = struct { /// This union takes advantage of the fact that the first page of memory /// is unmapped, giving us 4096 possible enum tags that have no payload. legacy: extern union { - /// If the tag value is less than Tag.no_payload_count, then no pointer - /// dereference is needed. - tag_if_small_enough: Tag, ptr_otherwise: *Payload, }, @@ -64,8 +61,6 @@ pub const Value = struct { /// An instance of a union. @"union", - pub const no_payload_count = 0; - pub fn Type(comptime t: Tag) type { return switch (t) { .eu_payload, @@ -96,16 +91,7 @@ pub const Value = struct { } }; - pub fn initTag(small_tag: Tag) Value { - assert(@enumToInt(small_tag) < Tag.no_payload_count); - return Value{ - .ip_index = .none, - .legacy = .{ .tag_if_small_enough = small_tag }, - }; - } - pub fn initPayload(payload: *Payload) Value { - assert(@enumToInt(payload.tag) >= Tag.no_payload_count); return Value{ .ip_index = .none, .legacy = .{ .ptr_otherwise = payload }, @@ -114,11 +100,7 @@ pub const Value = struct { pub fn tag(self: Value) Tag { assert(self.ip_index == .none); - if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { - return self.legacy.tag_if_small_enough; - } else { - return self.legacy.ptr_otherwise.tag; - } + return self.legacy.ptr_otherwise.tag; } /// Prefer `castTag` to this. @@ -129,12 +111,7 @@ pub const Value = struct { if (@hasField(T, "base_tag")) { return self.castTag(T.base_tag); } - if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { - return null; - } inline for (@typeInfo(Tag).Enum.fields) |field| { - if (field.value < Tag.no_payload_count) - continue; const t = @intToEnum(Tag, field.value); if (self.legacy.ptr_otherwise.tag == t) { if (T == t.Type()) { @@ -149,9 +126,6 @@ pub const Value = struct { pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() { if (self.ip_index != .none) return null; - if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) - return null; - if (self.legacy.ptr_otherwise.tag == t) return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise); @@ -164,12 +138,7 @@ pub const Value = struct { if (self.ip_index != .none) { return Value{ .ip_index = self.ip_index, .legacy = undefined }; } - if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { - return Value{ - .ip_index = .none, - .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, - }; - } else switch (self.legacy.ptr_otherwise.tag) { + switch (self.legacy.ptr_otherwise.tag) { .bytes => { const bytes = self.castTag(.bytes).?.data; const new_payload = try arena.create(Payload.Bytes); @@ -312,6 +281,30 @@ pub const Value = struct { } }; } + /// Asserts that the value is representable as an array of bytes. + /// Returns the value as a null-terminated string stored in the InternPool. + pub fn toIpString(val: Value, ty: Type, mod: *Module) !InternPool.NullTerminatedString { + const ip = &mod.intern_pool; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .enum_literal => |enum_literal| enum_literal, + .ptr => |ptr| switch (ptr.len) { + .none => unreachable, + else => try arrayToIpString(val, ptr.len.toValue().toUnsignedInt(mod), mod), + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try ip.getOrPutString(mod.gpa, bytes), + .elems => try arrayToIpString(val, ty.arrayLen(mod), mod), + .repeated_elem => |elem| { + const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); + const len = @intCast(usize, ty.arrayLen(mod)); + try ip.string_bytes.appendNTimes(mod.gpa, byte, len); + return ip.getOrPutTrailingString(mod.gpa, len); + }, + }, + else => unreachable, + }; + } + /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { @@ -319,11 +312,11 @@ pub const Value = struct { .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), .ptr => |ptr| switch (ptr.len) { .none => unreachable, - else => arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), + else => try arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), }, .aggregate => |aggregate| switch (aggregate.storage) { .bytes => |bytes| try allocator.dupe(u8, bytes), - .elems => arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), .repeated_elem => |elem| { const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); @@ -344,6 +337,23 @@ pub const Value = struct { return result; } + fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTerminatedString { + const gpa = mod.gpa; + const ip = &mod.intern_pool; + const len = @intCast(usize, len_u64); + try ip.string_bytes.ensureUnusedCapacity(gpa, len); + for (0..len) |i| { + // I don't think elemValue has the possibility to affect ip.string_bytes. Let's + // assert just to be sure. + const prev = ip.string_bytes.items.len; + const elem_val = try val.elemValue(mod, i); + assert(ip.string_bytes.items.len == prev); + const byte = @intCast(u8, elem_val.toUnsignedInt(mod)); + ip.string_bytes.appendAssumeCapacity(byte); + } + return ip.getOrPutTrailingString(gpa, len); + } + pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { if (val.ip_index != .none) return (try mod.getCoerced(val, ty)).toIntern(); switch (val.tag()) { @@ -498,7 +508,7 @@ pub const Value = struct { // Assume it is already an integer and return it directly. .simple_type, .int_type => val, .enum_literal => |enum_literal| { - const field_index = ty.enumFieldIndex(ip.stringToSlice(enum_literal), mod).?; + const field_index = ty.enumFieldIndex(enum_literal, mod).?; return switch (ip.indexToKey(ty.toIntern())) { // Assume it is already an integer and return it directly. .simple_type, .int_type => val, @@ -776,7 +786,7 @@ pub const Value = struct { .error_union => |error_union| error_union.val.err_name, else => unreachable, }; - const int = mod.global_error_set.get(mod.intern_pool.stringToSlice(name)).?; + const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian); }, .Union => switch (ty.containerLayout(mod)) { @@ -1028,10 +1038,10 @@ pub const Value = struct { // TODO revisit this when we have the concept of the error tag type const Int = u16; const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian); - const name = mod.error_name_list.items[@intCast(usize, int)]; + const name = mod.global_error_set.keys()[@intCast(usize, int)]; return (try mod.intern(.{ .err = .{ .ty = ty.toIntern(), - .name = mod.intern_pool.getString(name).unwrap().?, + .name = name, } })).toValue(); }, .Pointer => { @@ -2155,15 +2165,29 @@ pub const Value = struct { /// unreachable. For error unions, prefer `errorUnionIsPayload` to find out whether /// something is an error or not because it works without having to figure out the /// string. - pub fn getError(self: Value, mod: *const Module) ?[]const u8 { - return mod.intern_pool.stringToSliceUnwrap(switch (mod.intern_pool.indexToKey(self.toIntern())) { - .err => |err| err.name.toOptional(), + pub fn getError(val: Value, mod: *const Module) ?[]const u8 { + return switch (getErrorName(val, mod)) { + .empty => null, + else => |s| mod.intern_pool.stringToSlice(s), + }; + } + + pub fn getErrorName(val: Value, mod: *const Module) InternPool.NullTerminatedString { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .err => |err| err.name, .error_union => |error_union| switch (error_union.val) { - .err_name => |err_name| err_name.toOptional(), - .payload => .none, + .err_name => |err_name| err_name, + .payload => .empty, }, else => unreachable, - }); + }; + } + + pub fn getErrorInt(val: Value, mod: *const Module) Module.ErrorInt { + return switch (getErrorName(val, mod)) { + .empty => 0, + else => |s| @intCast(Module.ErrorInt, mod.global_error_set.getIndex(s).?), + }; } /// Assumes the type is an error union. Returns true if and only if the value is @@ -4225,7 +4249,7 @@ pub const Value = struct { var fields: [tags.len]std.builtin.Type.StructField = undefined; for (&fields, tags) |*field, t| field.* = .{ .name = t.name, - .type = *if (t.value < Tag.no_payload_count) void else @field(Tag, t.name).Type(), + .type = *@field(Tag, t.name).Type(), .default_value = null, .is_comptime = false, .alignment = 0, From f1c900c72e0d941fb2ab87197485c710fc95450b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 2 Jun 2023 00:33:28 -0700 Subject: [PATCH 168/205] compiler: avoid use of undefined memory InternPool is nice in some ways but it also comes with its own set of footguns. This commit fixes 5 instances. I see quite a few Valgrind warnings remaining when running the behavior tests. Perhaps the solution is to have stringToSlice return a struct with start and length as indexes, which has a format function? --- src/Module.zig | 33 +++++++++++++++++++++++++++------ src/Sema.zig | 5 ++++- 2 files changed, 31 insertions(+), 7 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 1e75ab037d6e..435577cf965a 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -576,7 +576,6 @@ pub const Decl = struct { } mod.destroyFunc(func); } - _ = mod.memoized_decls.remove(decl.val.ip_index); } /// This name is relative to the containing namespace of the decl. @@ -690,10 +689,30 @@ pub const Decl = struct { } pub fn getFullyQualifiedName(decl: Decl, mod: *Module) !InternPool.NullTerminatedString { - const gpa = mod.gpa; + if (decl.name_fully_qualified) return decl.name; + const ip = &mod.intern_pool; + const count = count: { + var count: usize = ip.stringToSlice(decl.name).len + 1; + var ns: Namespace.Index = decl.src_namespace; + while (true) { + const namespace = mod.namespacePtr(ns); + const ns_decl_index = namespace.getDeclIndex(mod); + const ns_decl = mod.declPtr(ns_decl_index); + count += ip.stringToSlice(ns_decl.name).len + 1; + ns = namespace.parent.unwrap() orelse { + count += namespace.file_scope.sub_file_path.len; + break :count count; + }; + } + }; + + const gpa = mod.gpa; const start = ip.string_bytes.items.len; - try decl.renderFullyQualifiedName(mod, ip.string_bytes.writer(gpa)); + // Protects reads of interned strings from being reallocated during the call to + // renderFullyQualifiedName. + try ip.string_bytes.ensureUnusedCapacity(gpa, count); + decl.renderFullyQualifiedName(mod, ip.string_bytes.writer(gpa)) catch unreachable; // Sanitize the name for nvptx which is more restrictive. // TODO This should be handled by the backend, not the frontend. Have a @@ -4018,7 +4037,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { .unreferenced => false, }; - var decl_prog_node = mod.sema_prog_node.start(mod.intern_pool.stringToSlice(decl.name), 0); + var decl_prog_node = mod.sema_prog_node.start("", 0); decl_prog_node.activate(); defer decl_prog_node.end(); @@ -5774,9 +5793,11 @@ pub fn createAnonymousDeclFromDecl( const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope); errdefer mod.destroyDecl(new_decl_index); const ip = &mod.intern_pool; - const name = try ip.getOrPutStringFmt(mod.gpa, "{s}__anon_{d}", .{ + // This protects the getOrPutStringFmt from reallocating src decl name while reading it. + try ip.string_bytes.ensureUnusedCapacity(mod.gpa, ip.stringToSlice(src_decl.name).len + 20); + const name = ip.getOrPutStringFmt(mod.gpa, "{s}__anon_{d}", .{ ip.stringToSlice(src_decl.name), @enumToInt(new_decl_index), - }); + }) catch unreachable; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, tv, name); return new_decl_index; } diff --git a/src/Sema.zig b/src/Sema.zig index da8878ed4d59..3585c106eb55 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -17065,6 +17065,7 @@ fn typeInfoNamespaceDecls( seen_namespaces: *std.AutoHashMap(*Namespace, void), ) !void { const mod = sema.mod; + const ip = &mod.intern_pool; const gop = try seen_namespaces.getOrPut(namespace); if (gop.found_existing) return; const decls = namespace.decls.keys(); @@ -17081,7 +17082,9 @@ fn typeInfoNamespaceDecls( const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const name = mod.intern_pool.stringToSlice(decl.name); + // Protects the decl name slice from being invalidated at the call to intern(). + try ip.string_bytes.ensureUnusedCapacity(sema.gpa, ip.stringToSlice(decl.name).len + 1); + const name = ip.stringToSlice(decl.name); const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, From 35550c840b07df017b360c81c3cd0a8a3da55aae Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 2 Jun 2023 00:53:33 -0700 Subject: [PATCH 169/205] Module: fix populateTestFunctions UAF --- src/Module.zig | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 435577cf965a..1b64f9f72eae 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6322,11 +6322,12 @@ pub fn populateTestFunctions( main_progress_node: *std.Progress.Node, ) !void { const gpa = mod.gpa; + const ip = &mod.intern_pool; const builtin_pkg = mod.main_pkg.table.get("builtin").?; const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file; const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?); const builtin_namespace = mod.namespacePtr(root_decl.src_namespace); - const test_functions_str = try mod.intern_pool.getOrPutString(gpa, "test_functions"); + const test_functions_str = try ip.getOrPutString(gpa, "test_functions"); const decl_index = builtin_namespace.decls.getKeyAdapted( test_functions_str, DeclAdapter{ .mod = mod }, @@ -6362,7 +6363,9 @@ pub fn populateTestFunctions( for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = mod.declPtr(test_decl_index); - const test_decl_name = mod.intern_pool.stringToSlice(test_decl.name); + // Protects test_decl_name from being invalidated during call to intern() below. + try ip.string_bytes.ensureUnusedCapacity(gpa, ip.stringToSlice(test_decl.name).len + 10); + const test_decl_name = ip.stringToSlice(test_decl.name); const test_name_decl_index = n: { const test_name_decl_ty = try mod.arrayType(.{ .len = test_decl_name.len, @@ -6444,7 +6447,7 @@ pub fn populateTestFunctions( .addr = .{ .decl = array_decl_index }, .len = (try mod.intValue(Type.usize, mod.test_functions.count())).toIntern(), } }); - mod.intern_pool.mutateVarInit(decl.val.toIntern(), new_init); + ip.mutateVarInit(decl.val.toIntern(), new_init); // Since we are replacing the Decl's value we must perform cleanup on the // previous value. From bc3b56f957d950edb6fde3585f8e9f4dda009d3e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 1 Jun 2023 22:05:54 -0400 Subject: [PATCH 170/205] llvm: fix undefined pointer type --- src/codegen/llvm.zig | 247 +++++++++++++++++++++---------------------- 1 file changed, 120 insertions(+), 127 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index bd5052809512..723a55002703 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3789,10 +3789,7 @@ pub const DeclGen = struct { fn lowerIntAsPtr(dg: *DeclGen, val: Value) Error!*llvm.Value { switch (dg.module.intern_pool.indexToKey(val.toIntern())) { - .undef => { - const llvm_usize = try dg.lowerType(Type.usize); - return llvm_usize.getUndef(); - }, + .undef => return dg.context.pointerType(0).getUndef(), .int => { var bigint_space: Value.BigIntSpace = undefined; const bigint = val.toBigInt(&bigint_space, dg.module); @@ -3847,141 +3844,137 @@ pub const DeclGen = struct { fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { const mod = dg.module; const target = mod.getTarget(); - return switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { - .int => dg.lowerIntAsPtr(ptr_val), - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), - .mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl), - .int => |int| dg.lowerIntAsPtr(int.toValue()), - .eu_payload => |eu_ptr| { - const parent_llvm_ptr = try dg.lowerParentPtr(eu_ptr.toValue(), true); - - const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod); - const payload_ty = eu_ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - // In this case, we represent pointer to error union the same as pointer - // to the payload. - return parent_llvm_ptr; - } + return switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) { + .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), + .mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl), + .int => |int| dg.lowerIntAsPtr(int.toValue()), + .eu_payload => |eu_ptr| { + const parent_llvm_ptr = try dg.lowerParentPtr(eu_ptr.toValue(), true); + + const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod); + const payload_ty = eu_ty.errorUnionPayload(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + // In this case, we represent pointer to error union the same as pointer + // to the payload. + return parent_llvm_ptr; + } - const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; - const llvm_u32 = dg.context.intType(32); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(payload_offset, .False), - }; - const eu_llvm_ty = try dg.lowerType(eu_ty); - return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .opt_payload => |opt_ptr| { - const parent_llvm_ptr = try dg.lowerParentPtr(opt_ptr.toValue(), true); + const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; + const llvm_u32 = dg.context.intType(32); + const indices: [2]*llvm.Value = .{ + llvm_u32.constInt(0, .False), + llvm_u32.constInt(payload_offset, .False), + }; + const eu_llvm_ty = try dg.lowerType(eu_ty); + return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + .opt_payload => |opt_ptr| { + const parent_llvm_ptr = try dg.lowerParentPtr(opt_ptr.toValue(), true); - const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod); - const payload_ty = opt_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or - payload_ty.optionalReprIsPayload(mod)) - { - // In this case, we represent pointer to optional the same as pointer - // to the payload. - return parent_llvm_ptr; - } + const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod); + const payload_ty = opt_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + payload_ty.optionalReprIsPayload(mod)) + { + // In this case, we represent pointer to optional the same as pointer + // to the payload. + return parent_llvm_ptr; + } - const llvm_u32 = dg.context.intType(32); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(0, .False), - }; - const opt_llvm_ty = try dg.lowerType(opt_ty); - return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .comptime_field => unreachable, - .elem => |elem_ptr| { - const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.base.toValue(), true); + const llvm_u32 = dg.context.intType(32); + const indices: [2]*llvm.Value = .{ + llvm_u32.constInt(0, .False), + llvm_u32.constInt(0, .False), + }; + const opt_llvm_ty = try dg.lowerType(opt_ty); + return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + .comptime_field => unreachable, + .elem => |elem_ptr| { + const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.base.toValue(), true); - const llvm_usize = try dg.lowerType(Type.usize); - const indices: [1]*llvm.Value = .{ - llvm_usize.constInt(elem_ptr.index, .False), - }; - const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); - const elem_llvm_ty = try dg.lowerType(elem_ty); - return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .field => |field_ptr| { - const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.base.toValue(), byte_aligned); - const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); - - const field_index = @intCast(u32, field_ptr.index); - const llvm_u32 = dg.context.intType(32); - switch (parent_ty.zigTypeTag(mod)) { - .Union => { - if (parent_ty.containerLayout(mod) == .Packed) { - return parent_llvm_ptr; - } + const llvm_usize = try dg.lowerType(Type.usize); + const indices: [1]*llvm.Value = .{ + llvm_usize.constInt(elem_ptr.index, .False), + }; + const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); + const elem_llvm_ty = try dg.lowerType(elem_ty); + return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + .field => |field_ptr| { + const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.base.toValue(), byte_aligned); + const parent_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + + const field_index = @intCast(u32, field_ptr.index); + const llvm_u32 = dg.context.intType(32); + switch (parent_ty.zigTypeTag(mod)) { + .Union => { + if (parent_ty.containerLayout(mod) == .Packed) { + return parent_llvm_ptr; + } - const layout = parent_ty.unionGetLayout(mod); - if (layout.payload_size == 0) { - // In this case a pointer to the union and a pointer to any - // (void) payload is the same. - return parent_llvm_ptr; - } - const llvm_pl_index = if (layout.tag_size == 0) - 0 - else - @boolToInt(layout.tag_align >= layout.payload_align); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_pl_index, .False), + const layout = parent_ty.unionGetLayout(mod); + if (layout.payload_size == 0) { + // In this case a pointer to the union and a pointer to any + // (void) payload is the same. + return parent_llvm_ptr; + } + const llvm_pl_index = if (layout.tag_size == 0) + 0 + else + @boolToInt(layout.tag_align >= layout.payload_align); + const indices: [2]*llvm.Value = .{ + llvm_u32.constInt(0, .False), + llvm_u32.constInt(llvm_pl_index, .False), + }; + const parent_llvm_ty = try dg.lowerType(parent_ty); + return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + .Struct => { + if (parent_ty.containerLayout(mod) == .Packed) { + if (!byte_aligned) return parent_llvm_ptr; + const llvm_usize = dg.context.intType(target.ptrBitWidth()); + const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); + // count bits of fields before this one + const prev_bits = b: { + var b: usize = 0; + for (parent_ty.structFields(mod).values()[0..field_index]) |field| { + if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + b += @intCast(usize, field.ty.bitSize(mod)); + } + break :b b; }; - const parent_llvm_ty = try dg.lowerType(parent_ty); - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .Struct => { - if (parent_ty.containerLayout(mod) == .Packed) { - if (!byte_aligned) return parent_llvm_ptr; - const llvm_usize = dg.context.intType(target.ptrBitWidth()); - const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); - // count bits of fields before this one - const prev_bits = b: { - var b: usize = 0; - for (parent_ty.structFields(mod).values()[0..field_index]) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - b += @intCast(usize, field.ty.bitSize(mod)); - } - break :b b; - }; - const byte_offset = llvm_usize.constInt(prev_bits / 8, .False); - const field_addr = base_addr.constAdd(byte_offset); - const final_llvm_ty = dg.context.pointerType(0); - return field_addr.constIntToPtr(final_llvm_ty); - } + const byte_offset = llvm_usize.constInt(prev_bits / 8, .False); + const field_addr = base_addr.constAdd(byte_offset); + const final_llvm_ty = dg.context.pointerType(0); + return field_addr.constIntToPtr(final_llvm_ty); + } - const parent_llvm_ty = try dg.lowerType(parent_ty); - if (llvmField(parent_ty, field_index, mod)) |llvm_field| { - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_field.index, .False), - }; - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - } else { - const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); - const indices: [1]*llvm.Value = .{llvm_index}; - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - } - }, - .Pointer => { - assert(parent_ty.isSlice(mod)); + const parent_llvm_ty = try dg.lowerType(parent_ty); + if (llvmField(parent_ty, field_index, mod)) |llvm_field| { const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), - llvm_u32.constInt(field_index, .False), + llvm_u32.constInt(llvm_field.index, .False), }; - const parent_llvm_ty = try dg.lowerType(parent_ty); return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - else => unreachable, - } - }, + } else { + const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); + const indices: [1]*llvm.Value = .{llvm_index}; + return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + } + }, + .Pointer => { + assert(parent_ty.isSlice(mod)); + const indices: [2]*llvm.Value = .{ + llvm_u32.constInt(0, .False), + llvm_u32.constInt(field_index, .False), + }; + const parent_llvm_ty = try dg.lowerType(parent_ty); + return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); + }, + else => unreachable, + } }, - else => unreachable, }; } From fdfe730487972f089786938706f311b1f8631333 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 1 Jun 2023 22:20:26 -0400 Subject: [PATCH 171/205] InternPool: fix more key lifetime issues --- src/Sema.zig | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 3585c106eb55..794638ea4387 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7685,8 +7685,7 @@ fn instantiateGenericCall( // Make a runtime call to the new function, making sure to omit the comptime args. const comptime_args = callee.comptime_args.?; const func_ty = mod.declPtr(callee.owner_decl).ty; - const new_fn_info = mod.typeToFunc(func_ty).?; - const runtime_args_len = @intCast(u32, new_fn_info.param_types.len); + const runtime_args_len = @intCast(u32, mod.typeToFunc(func_ty).?.param_types.len); const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); { var runtime_i: u32 = 0; @@ -7702,7 +7701,7 @@ fn instantiateGenericCall( uncasted_args[total_i], comptime_args[total_i], runtime_args, - new_fn_info, + mod.typeToFunc(func_ty).?, &runtime_i, ) catch |err| switch (err) { error.NeededSourceLocation => { @@ -7713,7 +7712,7 @@ fn instantiateGenericCall( uncasted_args[total_i], comptime_args[total_i], runtime_args, - new_fn_info, + mod.typeToFunc(func_ty).?, &runtime_i, ); unreachable; @@ -7723,12 +7722,12 @@ fn instantiateGenericCall( total_i += 1; } - try sema.queueFullTypeResolution(new_fn_info.return_type.toType()); + try sema.queueFullTypeResolution(mod.typeToFunc(func_ty).?.return_type.toType()); } if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - if (sema.owner_func != null and new_fn_info.return_type.toType().isError(mod)) { + if (sema.owner_func != null and mod.typeToFunc(func_ty).?.return_type.toType().isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -16346,10 +16345,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Optional value is only null if anyerror // Value can be zero-length slice otherwise const error_field_vals = if (ty.isAnyError(mod)) null else blk: { - const names = ty.errorSetNames(mod); - const vals = try sema.arena.alloc(InternPool.Index, names.len); - for (vals, names) |*field_val, name_ip| { - const name = ip.stringToSlice(name_ip); + const vals = try sema.arena.alloc(InternPool.Index, ty.errorSetNames(mod).len); + for (vals, 0..) |*field_val, i| { + const name = ip.stringToSlice(ty.errorSetNames(mod)[i]); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); From 04e66e6b4deb67aef9a4064decd82a678cb7ec82 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 2 Jun 2023 01:15:36 -0400 Subject: [PATCH 172/205] InternPool: add optional coercion --- lib/std/child_process.zig | 6 +++--- lib/std/process.zig | 2 +- src/InternPool.zig | 15 ++++++++++++--- src/Sema.zig | 19 +++++++++++++++---- 4 files changed, 31 insertions(+), 11 deletions(-) diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index e051ea532ebb..db8524200260 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -542,7 +542,7 @@ pub const ChildProcess = struct { } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @ptrCast([*:null]?[*:0]const u8, os.environ.ptr); + break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process"); @@ -1425,9 +1425,9 @@ pub fn createWindowsEnvBlock(allocator: mem.Allocator, env_map: *const EnvMap) ! return try allocator.realloc(result, i); } -pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const EnvMap) ![:null]?[*:0]const u8 { +pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const EnvMap) ![:null]?[*:0]u8 { const envp_count = env_map.count(); - const envp_buf = try arena.allocSentinel(?[*:0]const u8, envp_count, null); + const envp_buf = try arena.allocSentinel(?[*:0]u8, envp_count, null); { var it = env_map.iterator(); var i: usize = 0; diff --git a/lib/std/process.zig b/lib/std/process.zig index f5972eda1e91..6ad0df868ead 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -1143,7 +1143,7 @@ pub fn execve( } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @ptrCast([*:null]?[*:0]const u8, os.environ.ptr); + break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: std.process.execv implementation has no way to collect the environment variables to forward to the child process"); diff --git a/src/InternPool.zig b/src/InternPool.zig index ecdd30d1109c..801e351b4ef0 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4614,18 +4614,27 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .int => |int| return ip.getCoerced(gpa, int, new_ty), else => {}, }, - .opt => |opt| if (ip.isPointerType(new_ty)) - return switch (opt.val) { + .opt => |opt| switch (ip.indexToKey(new_ty)) { + .ptr_type => |ptr_type| return switch (opt.val) { .none => try ip.get(gpa, .{ .ptr = .{ .ty = new_ty, .addr = .{ .int = .zero_usize }, - .len = switch (ip.indexToKey(new_ty).ptr_type.flags.size) { + .len = switch (ptr_type.flags.size) { .One, .Many, .C => .none, .Slice => try ip.get(gpa, .{ .undef = .usize_type }), }, } }), else => |payload| try ip.getCoerced(gpa, payload, new_ty), }, + .opt_type => |child_type| return try ip.get(gpa, .{ .opt = .{ + .ty = new_ty, + .val = switch (opt.val) { + .none => .none, + else => try ip.getCoerced(gpa, opt.val, child_type), + }, + } }), + else => {}, + }, .err => |err| if (ip.isErrorSetType(new_ty)) return ip.get(gpa, .{ .err = .{ .ty = new_ty, diff --git a/src/Sema.zig b/src/Sema.zig index 794638ea4387..f8af883c4299 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -26470,7 +26470,11 @@ fn coerceExtra( } if (dest_info.sentinel == null or inst_info.sentinel == null or - !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, mod)) + !dest_info.sentinel.?.eql( + try mod.getCoerced(inst_info.sentinel.?, dest_info.pointee_type), + dest_info.pointee_type, + mod, + )) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -27350,7 +27354,11 @@ fn coerceInMemoryAllowed( } const ok_sent = dest_info.sentinel == null or (src_info.sentinel != null and - dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, mod)); + dest_info.sentinel.?.eql( + try mod.getCoerced(src_info.sentinel.?, dest_info.elem_type), + dest_info.elem_type, + mod, + )); if (!ok_sent) { return InMemoryCoercionResult{ .array_sentinel = .{ .actual = src_info.sentinel orelse Value.@"unreachable", @@ -27704,8 +27712,11 @@ fn coerceInMemoryAllowedPtrs( } const ok_sent = dest_info.sentinel == null or src_info.size == .C or - (src_info.sentinel != null and - dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type, sema.mod)); + (src_info.sentinel != null and dest_info.sentinel.?.eql( + try mod.getCoerced(src_info.sentinel.?, dest_info.pointee_type), + dest_info.pointee_type, + sema.mod, + )); if (!ok_sent) { return InMemoryCoercionResult{ .ptr_sentinel = .{ .actual = src_info.sentinel orelse Value.@"unreachable", From da24ea7f36d056cb49e8e91064f06cb724e46f67 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 2 Jun 2023 04:24:25 -0400 Subject: [PATCH 173/205] Sema: rewrite `monomorphed_funcs` usage In an effort to delete `Value.hashUncoerced`, generic instantiation has been redesigned. Instead of just storing instantiations in `monomorphed_funcs`, partially instantiated generic argument types are also cached. This isn't quite the single `getOrPut` that it used to be, but one `get` per generic argument plus one get for the instantiation, with an equal number of `put`s per unique instantiation isn't bad. --- src/Module.zig | 40 ++++++--- src/Sema.zig | 239 ++++++++++++++++++++----------------------------- src/value.zig | 71 --------------- 3 files changed, 126 insertions(+), 224 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 1b64f9f72eae..5f28f4f0691b 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -99,6 +99,7 @@ tmp_hack_arena: std.heap.ArenaAllocator, /// This is currently only used for string literals. memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, +monomorphed_func_keys: std.ArrayListUnmanaged(InternPool.Index) = .{}, /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. @@ -202,24 +203,40 @@ pub const CImportError = struct { } }; -const MonomorphedFuncsSet = std.HashMapUnmanaged( - Fn.Index, - void, +pub const MonomorphedFuncKey = struct { func: Fn.Index, args_index: u32, args_len: u32 }; + +pub const MonomorphedFuncAdaptedKey = struct { func: Fn.Index, args: []const InternPool.Index }; + +pub const MonomorphedFuncsSet = std.HashMapUnmanaged( + MonomorphedFuncKey, + InternPool.Index, MonomorphedFuncsContext, std.hash_map.default_max_load_percentage, ); -const MonomorphedFuncsContext = struct { +pub const MonomorphedFuncsContext = struct { + mod: *Module, + + pub fn eql(_: @This(), a: MonomorphedFuncKey, b: MonomorphedFuncKey) bool { + return std.meta.eql(a, b); + } + + pub fn hash(ctx: @This(), key: MonomorphedFuncKey) u64 { + const key_args = ctx.mod.monomorphed_func_keys.items[key.args_index..][0..key.args_len]; + return std.hash.Wyhash.hash(@enumToInt(key.func), std.mem.sliceAsBytes(key_args)); + } +}; + +pub const MonomorphedFuncsAdaptedContext = struct { mod: *Module, - pub fn eql(ctx: @This(), a: Fn.Index, b: Fn.Index) bool { - _ = ctx; - return a == b; + pub fn eql(ctx: @This(), adapted_key: MonomorphedFuncAdaptedKey, other_key: MonomorphedFuncKey) bool { + const other_key_args = ctx.mod.monomorphed_func_keys.items[other_key.args_index..][0..other_key.args_len]; + return adapted_key.func == other_key.func and std.mem.eql(InternPool.Index, adapted_key.args, other_key_args); } - /// Must match `Sema.GenericCallAdapter.hash`. - pub fn hash(ctx: @This(), key: Fn.Index) u64 { - return ctx.mod.funcPtr(key).hash; + pub fn hash(_: @This(), adapted_key: MonomorphedFuncAdaptedKey) u64 { + return std.hash.Wyhash.hash(@enumToInt(adapted_key.func), std.mem.sliceAsBytes(adapted_key.args)); } }; @@ -571,9 +588,6 @@ pub const Decl = struct { pub fn clearValues(decl: *Decl, mod: *Module) void { if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); - if (mod.funcPtr(func).comptime_args != null) { - _ = mod.monomorphed_funcs.removeContext(func, .{ .mod = mod }); - } mod.destroyFunc(func); } } diff --git a/src/Sema.zig b/src/Sema.zig index f8af883c4299..0ffd79bec31a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6679,78 +6679,6 @@ fn callBuiltin( _ = try sema.analyzeCall(block, builtin_fn, func_ty, sema.src, sema.src, modifier, false, args, null, null); } -const GenericCallAdapter = struct { - generic_fn: *Module.Fn, - precomputed_hash: u64, - func_ty_info: InternPool.Key.FuncType, - args: []const Arg, - module: *Module, - - const Arg = struct { - ty: Type, - val: Value, - is_anytype: bool, - }; - - pub fn eql(ctx: @This(), adapted_key: void, other_key: Module.Fn.Index) bool { - _ = adapted_key; - const other_func = ctx.module.funcPtr(other_key); - - // Checking for equality may happen on an item that has been inserted - // into the map but is not yet fully initialized. In such case, the - // two initialized fields are `hash` and `generic_owner_decl`. - if (ctx.generic_fn.owner_decl != other_func.generic_owner_decl.unwrap().?) return false; - - const other_comptime_args = other_func.comptime_args.?; - for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| { - const this_arg = ctx.args[i]; - const this_is_comptime = !this_arg.val.isGenericPoison(); - const other_is_comptime = !other_arg.val.isGenericPoison(); - const this_is_anytype = this_arg.is_anytype; - const other_is_anytype = other_func.isAnytypeParam(ctx.module, @intCast(u32, i)); - - if (other_is_anytype != this_is_anytype) return false; - if (other_is_comptime != this_is_comptime) return false; - - if (this_is_anytype) { - // Both are anytype parameters. - if (!this_arg.ty.eql(other_arg.ty, ctx.module)) { - return false; - } - if (this_is_comptime) { - // Both are comptime and anytype parameters with matching types. - if (!this_arg.val.eql(other_arg.val, other_arg.ty, ctx.module)) { - return false; - } - } - } else if (this_is_comptime) { - // Both are comptime parameters but not anytype parameters. - // We assert no error is possible here because any lazy values must be resolved - // before inserting into the generic function hash map. - const is_eql = Value.eqlAdvanced( - this_arg.val, - this_arg.ty, - other_arg.val, - other_arg.ty, - ctx.module, - null, - ) catch unreachable; - if (!is_eql) { - return false; - } - } - } - return true; - } - - /// The implementation of the hash is in semantic analysis of function calls, so - /// that any errors when computing the hash can be properly reported. - pub fn hash(ctx: @This(), adapted_key: void) u64 { - _ = adapted_key; - return ctx.precomputed_hash; - } -}; - fn analyzeCall( sema: *Sema, block: *Block, @@ -7480,11 +7408,12 @@ fn instantiateGenericCall( const ip = &mod.intern_pool; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn = mod.funcPtr(switch (ip.indexToKey(func_val.toIntern())) { + const module_fn_index = switch (ip.indexToKey(func_val.toIntern())) { .func => |function| function.index, .ptr => |ptr| mod.declPtr(ptr.addr.decl).val.getFunctionIndex(mod).unwrap().?, else => unreachable, - }); + }; + const module_fn = mod.funcPtr(module_fn_index); // Check the Module's generic function map with an adapted context, so that we // can match against `uncasted_args` rather than doing the work below to create a // generic Scope only to junk it if it matches an existing instantiation. @@ -7495,32 +7424,24 @@ fn instantiateGenericCall( const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); const zir_tags = fn_zir.instructions.items(.tag); - // This hash must match `Module.MonomorphedFuncsContext.hash`. - // For parameters explicitly marked comptime and simple parameter type expressions, - // we know whether a parameter is elided from a monomorphed function, and can - // use it in the hash here. However, for parameter type expressions that are not - // explicitly marked comptime and rely on previous parameter comptime values, we - // don't find out until after generating a monomorphed function whether the parameter - // type ended up being a "must-be-comptime-known" type. - var hasher = std.hash.Wyhash.init(0); - std.hash.autoHash(&hasher, module_fn.owner_decl); - - const generic_args = try sema.arena.alloc(GenericCallAdapter.Arg, func_ty_info.param_types.len); - { - var i: usize = 0; + const generic_args = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); + const callee_index = callee: { + var arg_i: usize = 0; + var generic_arg_i: u32 = 0; + var known_unique = false; for (fn_info.param_body) |inst| { var is_comptime = false; var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(@intCast(u5, i)); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(@intCast(u5, i)); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7529,7 +7450,15 @@ fn instantiateGenericCall( else => continue, } - const arg_ty = sema.typeOf(uncasted_args[i]); + defer arg_i += 1; + if (known_unique) { + if (is_comptime or is_anytype) { + generic_arg_i += 1; + } + continue; + } + + const arg_ty = sema.typeOf(uncasted_args[arg_i]); if (is_comptime or is_anytype) { // Tuple default values are a part of the type and need to be // resolved to hash the type. @@ -7537,69 +7466,72 @@ fn instantiateGenericCall( } if (is_comptime) { - const arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, uncasted_args[i]) catch |err| switch (err) { + const arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, uncasted_args[arg_i]) catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); - const arg_src = mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src); - _ = try sema.analyzeGenericCallArgVal(block, arg_src, uncasted_args[i]); + const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); + _ = try sema.analyzeGenericCallArgVal(block, arg_src, uncasted_args[arg_i]); unreachable; }, else => |e| return e, }; - arg_val.hashUncoerced(arg_ty, &hasher, mod); + if (is_anytype) { - std.hash.autoHash(&hasher, arg_ty.toIntern()); - generic_args[i] = .{ - .ty = arg_ty, - .val = arg_val, - .is_anytype = true, - }; + generic_args[generic_arg_i] = arg_val.toIntern(); } else { - generic_args[i] = .{ - .ty = arg_ty, - .val = arg_val, - .is_anytype = false, + const final_arg_ty = mod.monomorphed_funcs.getAdapted( + Module.MonomorphedFuncAdaptedKey{ + .func = module_fn_index, + .args = generic_args[0..generic_arg_i], + }, + Module.MonomorphedFuncsAdaptedContext{ .mod = mod }, + ) orelse { + known_unique = true; + generic_arg_i += 1; + continue; + }; + const casted_arg = sema.coerce(block, final_arg_ty.toType(), uncasted_args[arg_i], .unneeded) catch |err| switch (err) { + error.NeededSourceLocation => { + const decl = sema.mod.declPtr(block.src_decl); + const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); + _ = try sema.coerce(block, final_arg_ty.toType(), uncasted_args[arg_i], arg_src); + unreachable; + }, + else => |e| return e, }; + const casted_arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, casted_arg) catch |err| switch (err) { + error.NeededSourceLocation => { + const decl = sema.mod.declPtr(block.src_decl); + const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); + _ = try sema.analyzeGenericCallArgVal(block, arg_src, casted_arg); + unreachable; + }, + else => |e| return e, + }; + generic_args[generic_arg_i] = casted_arg_val.toIntern(); } + generic_arg_i += 1; } else if (is_anytype) { - std.hash.autoHash(&hasher, arg_ty.toIntern()); - generic_args[i] = .{ - .ty = arg_ty, - .val = Value.generic_poison, - .is_anytype = true, - }; - } else { - generic_args[i] = .{ - .ty = arg_ty, - .val = Value.generic_poison, - .is_anytype = false, - }; + generic_args[generic_arg_i] = arg_ty.toIntern(); + generic_arg_i += 1; } - - i += 1; } - } - const precomputed_hash = hasher.final(); + if (!known_unique) { + if (mod.monomorphed_funcs.getAdapted( + Module.MonomorphedFuncAdaptedKey{ + .func = module_fn_index, + .args = generic_args[0..generic_arg_i], + }, + Module.MonomorphedFuncsAdaptedContext{ .mod = mod }, + )) |callee_func| break :callee mod.intern_pool.indexToKey(callee_func).func.index; + } - const adapter: GenericCallAdapter = .{ - .generic_fn = module_fn, - .precomputed_hash = precomputed_hash, - .func_ty_info = func_ty_info, - .args = generic_args, - .module = mod, - }; - const gop = try mod.monomorphed_funcs.getOrPutContextAdapted(gpa, {}, adapter, .{ .mod = mod }); - const callee_index = if (!gop.found_existing) callee: { const new_module_func_index = try mod.createFunc(undefined); const new_module_func = mod.funcPtr(new_module_func_index); - // This ensures that we can operate on the hash map before the Module.Fn - // struct is fully initialized. - new_module_func.hash = precomputed_hash; new_module_func.generic_owner_decl = module_fn.owner_decl.toOptional(); new_module_func.comptime_args = null; - gop.key_ptr.* = new_module_func_index; try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); @@ -7641,7 +7573,8 @@ fn instantiateGenericCall( new_decl, new_decl_index, uncasted_args, - module_fn, + generic_arg_i, + module_fn_index, new_module_func_index, namespace_index, func_ty_info, @@ -7657,12 +7590,10 @@ fn instantiateGenericCall( } assert(namespace.anon_decls.orderedRemove(new_decl_index)); mod.destroyDecl(new_decl_index); - assert(mod.monomorphed_funcs.removeContext(new_module_func_index, .{ .mod = mod })); mod.destroyFunc(new_module_func_index); return err; }, else => { - assert(mod.monomorphed_funcs.removeContext(new_module_func_index, .{ .mod = mod })); // TODO look up the compile error that happened here and attach a note to it // pointing here, at the generic instantiation callsite. if (sema.owner_func) |owner_func| { @@ -7675,9 +7606,8 @@ fn instantiateGenericCall( }; break :callee new_func; - } else gop.key_ptr.*; + }; const callee = mod.funcPtr(callee_index); - callee.branch_quota = @max(callee.branch_quota, sema.branch_quota); const callee_inst = try sema.analyzeDeclVal(block, func_src, callee.owner_decl); @@ -7752,7 +7682,7 @@ fn instantiateGenericCall( if (call_tag == .call_always_tail) { return sema.handleTailCall(block, call_src, func_ty, result); } - if (new_fn_info.return_type == .noreturn_type) { + if (func_ty.fnReturnType(mod).isNoReturn(mod)) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -7766,7 +7696,8 @@ fn resolveGenericInstantiationType( new_decl: *Decl, new_decl_index: Decl.Index, uncasted_args: []const Air.Inst.Ref, - module_fn: *Module.Fn, + generic_args_len: u32, + module_fn_index: Module.Fn.Index, new_module_func: Module.Fn.Index, namespace: Namespace.Index, func_ty_info: InternPool.Key.FuncType, @@ -7777,6 +7708,7 @@ fn resolveGenericInstantiationType( const gpa = sema.gpa; const zir_tags = fn_zir.instructions.items(.tag); + const module_fn = mod.funcPtr(module_fn_index); const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); // Re-run the block that creates the function, with the comptime parameters @@ -7893,9 +7825,15 @@ fn resolveGenericInstantiationType( const new_func = new_func_val.getFunctionIndex(mod).unwrap().?; assert(new_func == new_module_func); + const generic_args_index = @intCast(u32, mod.monomorphed_func_keys.items.len); + const generic_args = try mod.monomorphed_func_keys.addManyAsSlice(gpa, generic_args_len); + var generic_arg_i: u32 = 0; + try mod.monomorphed_funcs.ensureUnusedCapacityContext(gpa, generic_args_len + 1, .{ .mod = mod }); + arg_i = 0; for (fn_info.param_body) |inst| { var is_comptime = false; + var is_anytype = false; switch (zir_tags[inst]) { .param => { is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); @@ -7904,9 +7842,11 @@ fn resolveGenericInstantiationType( is_comptime = true; }, .param_anytype => { + is_anytype = true; is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { + is_anytype = true; is_comptime = true; }, else => continue, @@ -7924,11 +7864,24 @@ fn resolveGenericInstantiationType( if (is_comptime) { const arg_val = (child_sema.resolveMaybeUndefValAllowVariables(arg) catch unreachable).?; + if (!is_anytype) { + if (mod.monomorphed_funcs.fetchPutAssumeCapacityContext(.{ + .func = module_fn_index, + .args_index = generic_args_index, + .args_len = generic_arg_i, + }, arg_ty.toIntern(), .{ .mod = mod })) |kv| assert(kv.value == arg_ty.toIntern()); + } + generic_args[generic_arg_i] = arg_val.toIntern(); + generic_arg_i += 1; child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, .val = (try arg_val.intern(arg_ty, mod)).toValue(), }; } else { + if (is_anytype) { + generic_args[generic_arg_i] = arg_ty.toIntern(); + generic_arg_i += 1; + } child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, .val = Value.generic_poison, @@ -7963,6 +7916,12 @@ fn resolveGenericInstantiationType( new_decl.owns_tv = true; new_decl.analysis = .complete; + mod.monomorphed_funcs.putAssumeCapacityNoClobberContext(.{ + .func = module_fn_index, + .args_index = generic_args_index, + .args_len = generic_arg_i, + }, new_decl.val.toIntern(), .{ .mod = mod }); + // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field // will be populated, ensuring it will have `analyzeBody` called with the ZIR // parameters mapped appropriately. diff --git a/src/value.zig b/src/value.zig index 2c38852bf575..39586152144b 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1691,77 +1691,6 @@ pub const Value = struct { return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq); } - /// This is a more conservative hash function that produces equal hashes for values - /// that can coerce into each other. - /// This function is used by hash maps and so treats floating-point NaNs as equal - /// to each other, and not equal to other floating-point values. - pub fn hashUncoerced(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - if (val.isUndef(mod)) return; - // The value is runtime-known and shouldn't affect the hash. - if (val.isRuntimeValue(mod)) return; - - if (val.ip_index != .none) { - // The InternPool data structure hashes based on Key to make interned objects - // unique. An Index can be treated simply as u32 value for the - // purpose of Type/Value hashing and equality. - std.hash.autoHash(hasher, val.toIntern()); - return; - } - - switch (ty.zigTypeTag(mod)) { - .Opaque => unreachable, // Cannot hash opaque types - .Void, - .NoReturn, - .Undefined, - .Null, - .Struct, // It sure would be nice to do something clever with structs. - => |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag), - .Pointer => { - assert(ty.isSlice(mod)); - const slice = val.castTag(.slice).?.data; - const ptr_ty = ty.slicePtrFieldType(mod); - slice.ptr.hashUncoerced(ptr_ty, hasher, mod); - }, - .Type, - .Float, - .ComptimeFloat, - .Bool, - .Int, - .ComptimeInt, - .Fn, - .Optional, - .ErrorSet, - .ErrorUnion, - .Enum, - .EnumLiteral, - => unreachable, // handled above with the ip_index check - .Array, .Vector => { - const len = ty.arrayLen(mod); - const elem_ty = ty.childType(mod); - var index: usize = 0; - while (index < len) : (index += 1) { - const elem_val = val.elemValue(mod, index) catch |err| switch (err) { - // Will be solved when arrays and vectors get migrated to the intern pool. - error.OutOfMemory => @panic("OOM"), - }; - elem_val.hashUncoerced(elem_ty, hasher, mod); - } - }, - .Union => { - hasher.update(val.tagName(mod)); - switch (mod.intern_pool.indexToKey(val.toIntern())) { - .un => |un| { - const active_field_ty = ty.unionFieldType(un.tag.toValue(), mod); - un.val.toValue().hashUncoerced(active_field_ty, hasher, mod); - }, - else => std.hash.autoHash(hasher, std.builtin.TypeId.Void), - } - }, - .Frame => @panic("TODO implement hashing frame values"), - .AnyFrame => @panic("TODO implement hashing anyframe values"), - } - } - pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool { return switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { From 7a59cd286345470412b4880ca576553c7d5827eb Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 2 Jun 2023 05:07:07 -0400 Subject: [PATCH 174/205] Sema: hack around UAF --- src/Sema.zig | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 0ffd79bec31a..8c657b3f52ec 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -26792,11 +26792,12 @@ fn coerceValueInMemory( else => unreachable, }; if (src_ty_child != dst_ty_child) break :direct; + // TODO: write something like getCoercedInts to avoid needing to dupe return (try mod.intern(.{ .aggregate = .{ .ty = dst_ty.toIntern(), .storage = switch (aggregate.storage) { - .bytes => |bytes| .{ .bytes = bytes[0..dest_len] }, - .elems => |elems| .{ .elems = elems[0..dest_len] }, + .bytes => |bytes| .{ .bytes = try sema.arena.dupe(u8, bytes[0..dest_len]) }, + .elems => |elems| .{ .elems = try sema.arena.dupe(InternPool.Index, elems[0..dest_len]) }, .repeated_elem => |elem| .{ .repeated_elem = elem }, }, } })).toValue(); From 0f80652efb170aa4158e378dbb493da717c9bd17 Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 2 Jun 2023 09:53:29 +0100 Subject: [PATCH 175/205] Sema: remove leftover references to value_arena Notably, there was a bug where the fields of reified structs and unions were allocated into an arena which was leaked. These are now in the Module.tmp_hack_arena. --- src/Sema.zig | 52 +++++++++++----------------------------------------- 1 file changed, 11 insertions(+), 41 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 8c657b3f52ec..81befbf49ebd 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -702,24 +702,15 @@ pub const Block = struct { pub fn startAnonDecl(block: *Block) !WipAnonDecl { return WipAnonDecl{ .block = block, - .new_decl_arena = std.heap.ArenaAllocator.init(block.sema.gpa), .finished = false, }; } pub const WipAnonDecl = struct { block: *Block, - new_decl_arena: std.heap.ArenaAllocator, finished: bool, - pub fn arena(wad: *WipAnonDecl) Allocator { - return wad.new_decl_arena.allocator(); - } - pub fn deinit(wad: *WipAnonDecl) void { - if (!wad.finished) { - wad.new_decl_arena.deinit(); - } wad.* = undefined; } @@ -2774,9 +2765,6 @@ fn zirStructDecl( break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - // Because these three things each reference each other, `undefined` // placeholders are used before being set after the struct type gains an // InternPool index. @@ -3230,9 +3218,6 @@ fn zirUnionDecl( break :blk decls_len; } else 0; - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - // Because these three things each reference each other, `undefined` // placeholders are used before being set after the union type gains an // InternPool index. @@ -3297,7 +3282,6 @@ fn zirOpaqueDecl( defer tracy.end(); const mod = sema.mod; - const gpa = sema.gpa; const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small); var extra_index: usize = extended.operand; @@ -3313,9 +3297,6 @@ fn zirOpaqueDecl( break :blk decls_len; } else 0; - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - // Because these three things each reference each other, `undefined` // placeholders are used in two places before being set after the opaque // type gains an InternPool index. @@ -3691,7 +3672,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( elem_ty, - try store_val.copy(anon_decl.arena()), + store_val, ptr_info.@"align", )); } @@ -3937,7 +3918,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com defer anon_decl.deinit(); const new_decl_index = try anon_decl.finish( final_elem_ty, - try store_val.copy(anon_decl.arena()), + store_val, ia1.alignment.toByteUnits(0), ); break :d new_decl_index; @@ -5168,7 +5149,7 @@ fn storeToInferredAllocComptime( defer anon_decl.deinit(); iac.decl_index = try anon_decl.finish( operand_ty, - try operand_val.copy(anon_decl.arena()), + operand_val, iac.alignment.toByteUnits(0), ); return; @@ -5860,7 +5841,7 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError defer anon_decl.deinit(); break :blk try anon_decl.finish( operand.ty, - try operand.val.copy(anon_decl.arena()), + operand.val, 0, ); }; @@ -15895,7 +15876,7 @@ fn zirBuiltinSrc( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); // The compiler must not call realpath anywhere. - const name = try fn_owner_decl.getFileScope(mod).fullPathZ(anon_decl.arena()); + const name = try fn_owner_decl.getFileScope(mod).fullPathZ(sema.arena); const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, @@ -18792,7 +18773,7 @@ fn addConstantMaybeRef( defer anon_decl.deinit(); const decl = try anon_decl.finish( ty, - try val.copy(anon_decl.arena()), + val, 0, // default alignment ); return sema.analyzeDeclRef(decl); @@ -19515,9 +19496,6 @@ fn zirReify( return sema.fail(block, src, "reified opaque must have no decls", .{}); } - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - // Because these three things each reference each other, // `undefined` placeholders are used in two places before being set // after the opaque type gains an InternPool index. @@ -19572,10 +19550,6 @@ fn zirReify( } const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - // Because these three things each reference each other, `undefined` // placeholders are used before being set after the union type gains an // InternPool index. @@ -19645,7 +19619,7 @@ fn zirReify( } // Fields - try union_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); + try union_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); for (0..fields_len) |i| { const elem_val = try fields_val.elemValue(mod, i); @@ -19873,10 +19847,6 @@ fn reifyStruct( const gpa = sema.gpa; const ip = &mod.intern_pool; - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - // Because these three things each reference each other, `undefined` // placeholders are used before being set after the struct type gains an // InternPool index. @@ -19921,7 +19891,7 @@ fn reifyStruct( // Fields const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); + try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); var i: usize = 0; while (i < fields_len) : (i += 1) { const elem_val = try fields_val.elemValue(mod, i); @@ -20209,7 +20179,7 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try ty.nameAllocArena(anon_decl.arena(), mod); + const bytes = try ty.nameAllocArena(sema.arena, mod); const decl_ty = try mod.arrayType(.{ .len = bytes.len, @@ -29740,7 +29710,7 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { defer anon_decl.deinit(); const decl = try anon_decl.finish( ty, - try val.copy(anon_decl.arena()), + val, 0, // default alignment ); try sema.maybeQueueFuncBodyAnalysis(decl); @@ -29824,7 +29794,7 @@ fn analyzeRef( defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( operand_ty, - try val.copy(anon_decl.arena()), + val, 0, // default alignment )); } From 0fd52cdc5eb4b17e8066a06d8af761f934cf8808 Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 2 Jun 2023 13:47:38 +0100 Subject: [PATCH 176/205] InternPool: avoid aggregate null bytes storage This is a workaround for InternPool currently not handling non-null-terminated strings. It avoids using the `bytes` storage for aggregates if there are any null bytes. In the future this should be changed so that the `bytes` storage can be used regardless of whether there are any null bytes. This is important for use cases such as `@embedFile`. However, this fixes a bug for now, and after this commit, stage2 self-hosts again. mlugg: stage5 passes all enabled behavior tests on my system. Commit message edited by Andrew Kelley --- src/InternPool.zig | 23 +++++++++++++++++++++-- src/Module.zig | 1 + 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 801e351b4ef0..2e592c4dd7df 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3951,6 +3951,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { else => unreachable, }, } + // We can't dedup '0' bytes in the pool or it could add garbage to string_bytes. So + // if there are any 0 bytes, we have to skip the bytes case. Note that it's okay for + // our sentinel to be 0 since getOrPutTrailingString would add a 0 sentinel anyway. + for (ip.string_bytes.items[string_bytes_index..]) |x| { + if (x == 0) { + ip.string_bytes.shrinkRetainingCapacity(string_bytes_index); + break :bytes; + } + } if (sentinel != .none) ip.string_bytes.appendAssumeCapacity( @intCast(u8, ip.indexToKey(sentinel).int.storage.u64), ); @@ -3975,7 +3984,17 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .ty = aggregate.ty, }), }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.storage.elems)); + switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |b| { + const elem = try ip.get(gpa, .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = b }, + } }); + ip.extra.appendAssumeCapacity(@enumToInt(elem)); + }, + .elems => |elems| ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, elems)), + .repeated_elem => |elem| ip.extra.appendNTimesAssumeCapacity(@enumToInt(elem), len), + } if (sentinel != .none) ip.extra.appendAssumeCapacity(@enumToInt(sentinel)); }, @@ -5203,7 +5222,7 @@ pub fn destroyFunc(ip: *InternPool, gpa: Allocator, index: Module.Fn.Index) void ip.funcPtr(index).* = undefined; ip.funcs_free_list.append(gpa, index) catch { // In order to keep `destroyFunc` a non-fallible function, we ignore memory - // allocation failures here, instead leaking the Union until garbage collection. + // allocation failures here, instead leaking the Fn until garbage collection. }; } diff --git a/src/Module.zig b/src/Module.zig index 5f28f4f0691b..9d58029cb519 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -337,6 +337,7 @@ pub const CaptureScope = struct { if (!self.failed()) { self.captures.deinit(gpa); } + gpa.destroy(self); } }; From ad54f47b95a2295e0c199decb5ff10c572317a22 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 2 Jun 2023 14:02:48 -0400 Subject: [PATCH 177/205] InternPool: optimize previous fix Just because we can't dedup, doesn't mean we can't use `string_bytes`. --- src/InternPool.zig | 30 ++++++++---------------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 2e592c4dd7df..bd7028b87947 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3951,24 +3951,20 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { else => unreachable, }, } - // We can't dedup '0' bytes in the pool or it could add garbage to string_bytes. So - // if there are any 0 bytes, we have to skip the bytes case. Note that it's okay for - // our sentinel to be 0 since getOrPutTrailingString would add a 0 sentinel anyway. - for (ip.string_bytes.items[string_bytes_index..]) |x| { - if (x == 0) { - ip.string_bytes.shrinkRetainingCapacity(string_bytes_index); - break :bytes; - } - } + const has_internal_null = + std.mem.indexOfScalar(u8, ip.string_bytes.items[string_bytes_index..], 0) != null; if (sentinel != .none) ip.string_bytes.appendAssumeCapacity( @intCast(u8, ip.indexToKey(sentinel).int.storage.u64), ); - const bytes = try ip.getOrPutTrailingString(gpa, len_including_sentinel); + const string = if (has_internal_null) + @intToEnum(String, string_bytes_index) + else + (try ip.getOrPutTrailingString(gpa, len_including_sentinel)).toString(); ip.items.appendAssumeCapacity(.{ .tag = .bytes, .data = ip.addExtraAssumeCapacity(Bytes{ .ty = aggregate.ty, - .bytes = bytes.toString(), + .bytes = string, }), }); return @intToEnum(Index, ip.items.len - 1); @@ -3984,17 +3980,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .ty = aggregate.ty, }), }); - switch (aggregate.storage) { - .bytes => |bytes| for (bytes) |b| { - const elem = try ip.get(gpa, .{ .int = .{ - .ty = .u8_type, - .storage = .{ .u64 = b }, - } }); - ip.extra.appendAssumeCapacity(@enumToInt(elem)); - }, - .elems => |elems| ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, elems)), - .repeated_elem => |elem| ip.extra.appendNTimesAssumeCapacity(@enumToInt(elem), len), - } + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.storage.elems)); if (sentinel != .none) ip.extra.appendAssumeCapacity(@enumToInt(sentinel)); }, From 6a15fc87ad62ec0509017c960f6983ce1493c31d Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 2 Jun 2023 14:42:05 -0400 Subject: [PATCH 178/205] Sema: handle generic types when coercing functions in memory This used to be handled by `Type.eql`, but that is now a single comparison. --- src/Sema.zig | 48 ++++++++++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 81befbf49ebd..ca4e761cdcdc 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -27498,17 +27498,20 @@ fn coerceInMemoryAllowedFns( } }; } - if (src_info.return_type != .noreturn_type) { - const dest_return_type = dest_info.return_type.toType(); - const src_return_type = src_info.return_type.toType(); - const rt = try sema.coerceInMemoryAllowed(block, dest_return_type, src_return_type, false, target, dest_src, src_src); - if (rt != .ok) { - return InMemoryCoercionResult{ .fn_return_type = .{ - .child = try rt.dupe(sema.arena), - .actual = dest_return_type, - .wanted = src_return_type, - } }; - } + switch (src_info.return_type) { + .noreturn_type, .generic_poison_type => {}, + else => { + const dest_return_type = dest_info.return_type.toType(); + const src_return_type = src_info.return_type.toType(); + const rt = try sema.coerceInMemoryAllowed(block, dest_return_type, src_return_type, false, target, dest_src, src_src); + if (rt != .ok) { + return InMemoryCoercionResult{ .fn_return_type = .{ + .child = try rt.dupe(sema.arena), + .actual = dest_return_type, + .wanted = src_return_type, + } }; + } + }, } } @@ -27548,15 +27551,20 @@ fn coerceInMemoryAllowedFns( } }; } - // Note: Cast direction is reversed here. - const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src); - if (param != .ok) { - return InMemoryCoercionResult{ .fn_param = .{ - .child = try param.dupe(sema.arena), - .actual = src_param_ty, - .wanted = dest_param_ty, - .index = param_i, - } }; + switch (src_param_ty.toIntern()) { + .generic_poison_type => {}, + else => { + // Note: Cast direction is reversed here. + const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src); + if (param != .ok) { + return InMemoryCoercionResult{ .fn_param = .{ + .child = try param.dupe(sema.arena), + .actual = src_param_ty, + .wanted = dest_param_ty, + .index = param_i, + } }; + } + }, } } From e23b0a01e6357252eb2c08a83eff9169ce49042c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 2 Jun 2023 18:49:40 -0400 Subject: [PATCH 179/205] InternPool: fix yet more key lifetime issues --- src/Module.zig | 15 +++++---- src/Sema.zig | 61 +++++++++++++++++++++---------------- src/arch/x86_64/CodeGen.zig | 8 ++--- src/codegen.zig | 2 +- src/type.zig | 5 ++- src/value.zig | 5 +-- 6 files changed, 52 insertions(+), 44 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 9d58029cb519..c1d6b8157a4e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5448,7 +5448,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE defer comptime_mutable_decls.deinit(); const fn_ty = decl.ty; - const fn_ty_info = mod.typeToFunc(fn_ty).?; var sema: Sema = .{ .mod = mod, @@ -5459,7 +5458,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE .owner_decl_index = decl_index, .func = func, .func_index = func_index.toOptional(), - .fn_ret_ty = fn_ty_info.return_type.toType(), + .fn_ret_ty = mod.typeToFunc(fn_ty).?.return_type.toType(), .owner_func = func, .owner_func_index = func_index.toOptional(), .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), @@ -5499,7 +5498,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. - const runtime_params_len = @intCast(u32, fn_ty_info.param_types.len); + const runtime_params_len = @intCast(u32, mod.typeToFunc(fn_ty).?.param_types.len); try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); @@ -5525,7 +5524,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE sema.inst_map.putAssumeCapacityNoClobber(inst, arg); total_param_index += 1; continue; - } else fn_ty_info.param_types[runtime_param_index].toType(); + } else mod.typeToFunc(fn_ty).?.param_types[runtime_param_index].toType(); const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, @@ -5623,7 +5622,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE // Crucially, this happens *after* we set the function state to success above, // so that dependencies on the function body will now be satisfied rather than // result in circular dependency errors. - sema.resolveFnTypes(mod.typeToFunc(fn_ty).?) catch |err| switch (err) { + sema.resolveFnTypes(fn_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, @@ -6378,9 +6377,9 @@ pub fn populateTestFunctions( for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = mod.declPtr(test_decl_index); - // Protects test_decl_name from being invalidated during call to intern() below. - try ip.string_bytes.ensureUnusedCapacity(gpa, ip.stringToSlice(test_decl.name).len + 10); - const test_decl_name = ip.stringToSlice(test_decl.name); + // TODO: write something like getCoercedInts to avoid needing to dupe + const test_decl_name = try gpa.dupe(u8, ip.stringToSlice(test_decl.name)); + defer gpa.free(test_decl_name); const test_name_decl_index = n: { const test_name_decl_ty = try mod.arrayType(.{ .len = test_decl_name.len, diff --git a/src/Sema.zig b/src/Sema.zig index ca4e761cdcdc..4d9fc201a11e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5227,6 +5227,8 @@ fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins fn addStrLit(sema: *Sema, block: *Block, bytes: []const u8) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + // TODO: write something like getCoercedInts to avoid needing to dupe + const duped_bytes = try sema.arena.dupe(u8, bytes); const ty = try mod.arrayType(.{ .len = bytes.len, .child = .u8_type, @@ -5234,7 +5236,7 @@ fn addStrLit(sema: *Sema, block: *Block, bytes: []const u8) CompileError!Air.Ins }); const val = try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), - .storage = .{ .bytes = bytes }, + .storage = .{ .bytes = duped_bytes }, } }); const gop = try mod.memoized_decls.getOrPut(gpa, val); if (!gop.found_existing) { @@ -11478,7 +11480,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError operand_ty.fmt(mod), }); } - for (operand_ty.errorSetNames(mod)) |error_name_ip| { + for (0..operand_ty.errorSetNames(mod).len) |i| { + const error_name_ip = operand_ty.errorSetNames(mod)[i]; const error_name = mod.intern_pool.stringToSlice(error_name_ip); if (seen_errors.contains(error_name)) continue; cases_len += 1; @@ -15851,7 +15854,8 @@ fn zirBuiltinSrc( const func_name_val = blk: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const name = mod.intern_pool.stringToSlice(fn_owner_decl.name); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, mod.intern_pool.stringToSlice(fn_owner_decl.name)); const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, @@ -16287,7 +16291,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const error_field_vals = if (ty.isAnyError(mod)) null else blk: { const vals = try sema.arena.alloc(InternPool.Index, ty.errorSetNames(mod).len); for (vals, 0..) |*field_val, i| { - const name = ip.stringToSlice(ty.errorSetNames(mod)[i]); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(ty.errorSetNames(mod)[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16417,8 +16422,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const enum_field_vals = try sema.arena.alloc(InternPool.Index, enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { - const name_ip = ip.indexToKey(ty.toIntern()).enum_type.names[i]; - const name = ip.stringToSlice(name_ip); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(ip.indexToKey(ty.toIntern()).enum_type.names[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16556,7 +16561,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai for (union_field_vals, 0..) |*field_val, i| { const field = union_fields.values()[i]; - const name = ip.stringToSlice(union_fields.keys()[i]); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(union_fields.keys()[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16714,9 +16720,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); + // TODO: write something like getCoercedInts to avoid needing to dupe const bytes = if (tuple.names.len != 0) // https://github.com/ziglang/zig/issues/15709 - @as([]const u8, ip.stringToSlice(tuple.names[i])) + try sema.arena.dupe(u8, ip.stringToSlice(ip.indexToKey(struct_ty.toIntern()).anon_struct_type.names[i])) else try std.fmt.allocPrint(sema.arena, "{d}", .{i}); const new_decl_ty = try mod.arrayType(.{ @@ -16771,7 +16778,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai struct_obj.fields.keys(), struct_obj.fields.values(), ) |*field_val, name_nts, field| { - const name = ip.stringToSlice(name_nts); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(name_nts)); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -17020,9 +17028,8 @@ fn typeInfoNamespaceDecls( const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - // Protects the decl name slice from being invalidated at the call to intern(). - try ip.string_bytes.ensureUnusedCapacity(sema.gpa, ip.stringToSlice(decl.name).len + 1); - const name = ip.stringToSlice(decl.name); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(decl.name)); const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, @@ -19060,6 +19067,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); }; + // TODO: write something like getCoercedInts to avoid needing to dupe const field_name = enum_ty.enumFieldName(field_index, mod); return sema.addStrLit(block, ip.stringToSlice(field_name)); } @@ -19601,7 +19609,6 @@ fn zirReify( // Tag type const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); var explicit_tags_seen: []bool = &.{}; - var explicit_enum_info: ?InternPool.Key.EnumType = null; var enum_field_names: []InternPool.NullTerminatedString = &.{}; if (tag_type_val.optionalValue(mod)) |payload_val| { union_obj.tag_ty = payload_val.toType(); @@ -19611,7 +19618,6 @@ fn zirReify( else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}), }; - explicit_enum_info = enum_type; explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); @memset(explicit_tags_seen, false); } else { @@ -19640,7 +19646,8 @@ fn zirReify( enum_field_names[i] = field_name; } - if (explicit_enum_info) |tag_info| { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ ip.stringToSlice(field_name), union_obj.tag_ty.fmt(mod) }); @@ -19705,7 +19712,8 @@ fn zirReify( } } - if (explicit_enum_info) |tag_info| { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{}); @@ -31625,17 +31633,17 @@ fn resolvePeerTypes( return chosen_ty; } -pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileError!void { +pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { const mod = sema.mod; - try sema.resolveTypeFully(fn_info.return_type.toType()); + try sema.resolveTypeFully(mod.typeToFunc(fn_ty).?.return_type.toType()); - if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.toType().isError(mod)) { + if (mod.comp.bin_file.options.error_return_tracing and mod.typeToFunc(fn_ty).?.return_type.toType().isError(mod)) { // Ensure the type exists so that backends can assume that. _ = try sema.getBuiltinType("StackTrace"); } - for (fn_info.param_types) |param_ty| { - try sema.resolveTypeFully(param_ty.toType()); + for (0..mod.typeToFunc(fn_ty).?.param_types.len) |i| { + try sema.resolveTypeFully(mod.typeToFunc(fn_ty).?.param_types[i].toType()); } } @@ -33077,7 +33085,6 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { var enum_field_names: []InternPool.NullTerminatedString = &.{}; var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}; var explicit_tags_seen: []bool = &.{}; - var explicit_enum_info: ?InternPool.Key.EnumType = null; if (tag_type_ref != .none) { const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x }; const provided_ty = try sema.resolveType(&block_scope, tag_ty_src, tag_type_ref); @@ -33114,7 +33121,6 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { }; // The fields of the union must match the enum exactly. // A flag per field is used to check for missing and extraneous fields. - explicit_enum_info = enum_type; explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); @memset(explicit_tags_seen, false); } @@ -33256,7 +33262,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { return sema.failWithOwnedErrorMsg(msg); } - if (explicit_enum_info) |tag_info| { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ @@ -33346,7 +33353,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } } - if (explicit_enum_info) |tag_info| { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(&block_scope, src, "enum field(s) missing in union", .{}); @@ -33706,9 +33714,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } // In this case the struct has all comptime-known fields and // therefore has one possible value. + // TODO: write something like getCoercedInts to avoid needing to dupe return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), - .storage = .{ .elems = tuple.values }, + .storage = .{ .elems = try sema.arena.dupe(InternPool.Index, tuple.values) }, } })).toValue(); }, diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 2675d5350a58..a1b57516ee37 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2026,13 +2026,9 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { try self.genLazySymbolRef(.lea, data_reg, .{ .kind = .const_data, .ty = enum_ty }); var data_off: i32 = 0; - for ( - exitlude_jump_relocs, - enum_ty.enumFields(mod), - 0.., - ) |*exitlude_jump_reloc, tag_name_ip, index_usize| { + for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, index_usize| { const index = @intCast(u32, index_usize); - const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); + const tag_name = mod.intern_pool.stringToSlice(enum_ty.enumFields(mod)[index_usize]); const tag_val = try mod.enumValueFieldIndex(enum_ty, index); const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val }); try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv); diff --git a/src/codegen.zig b/src/codegen.zig index 77359d78da0a..b39c3c5ec020 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -517,7 +517,7 @@ pub fn generateSymbol( const field_ty = field.ty; if (!field_ty.hasRuntimeBits(mod)) continue; - const field_val = switch (aggregate.storage) { + const field_val = switch (mod.intern_pool.indexToKey(typed_value.val.toIntern()).aggregate.storage) { .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ .ty = field_ty.toIntern(), .storage = .{ .u64 = bytes[index] }, diff --git a/src/type.zig b/src/type.zig index 43aaf3c78692..d9ae710b2d1b 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2576,9 +2576,12 @@ pub const Type = struct { } // In this case the struct has all comptime-known fields and // therefore has one possible value. + // TODO: write something like getCoercedInts to avoid needing to dupe + const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values); + defer mod.gpa.free(duped_values); return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), - .storage = .{ .elems = tuple.values }, + .storage = .{ .elems = duped_values }, } })).toValue(); }, diff --git a/src/value.zig b/src/value.zig index 39586152144b..8ab98bc99425 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1904,6 +1904,7 @@ pub const Value = struct { start: usize, end: usize, ) error{OutOfMemory}!Value { + // TODO: write something like getCoercedInts to avoid needing to dupe return switch (val.ip_index) { .none => switch (val.tag()) { .slice => val.castTag(.slice).?.data.ptr.sliceArray(mod, arena, start, end), @@ -1937,8 +1938,8 @@ pub const Value = struct { else => unreachable, }.toIntern(), .storage = switch (aggregate.storage) { - .bytes => |bytes| .{ .bytes = bytes[start..end] }, - .elems => |elems| .{ .elems = elems[start..end] }, + .bytes => .{ .bytes = try arena.dupe(u8, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.bytes[start..end]) }, + .elems => .{ .elems = try arena.dupe(InternPool.Index, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.elems[start..end]) }, .repeated_elem => |elem| .{ .repeated_elem = elem }, }, } })).toValue(), From 7c12e064c4e6cd7ea2243a665984e5c49bc94229 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 2 Jun 2023 16:09:59 -0700 Subject: [PATCH 180/205] Sema: reword compile error about LLVM extensions and C import --- src/Sema.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Sema.zig b/src/Sema.zig index 4d9fc201a11e..be354ff3500c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5451,7 +5451,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr // we check this here to avoid undefined symbols if (!@import("build_options").have_llvm) - return sema.fail(parent_block, src, "cannot do C import on Zig compiler not built with LLVM-extension", .{}); + return sema.fail(parent_block, src, "C import unavailable; Zig compiler built without LLVM extensions", .{}); var c_import_buf = std.ArrayList(u8).init(sema.gpa); defer c_import_buf.deinit(); From ab86b2024883f67c0fa06108f66e4e88b98c3163 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 2 Jun 2023 17:34:12 -0700 Subject: [PATCH 181/205] std.hash: improve small-key hashing in Wyhash Instead of carrying an optimized version of wyhash in the compiler for small keys, put it into the std lib where it belongs. ...except it does not match the official test cases. This will need to be fixed before merging into master. This is an extremely contributor-friendly task. Related issue: #15916 --- lib/std/hash/benchmark.zig | 10 ---- src/InternPool.zig | 119 +++++++------------------------------ 2 files changed, 22 insertions(+), 107 deletions(-) diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig index a3fc6c25748f..cf2f18d22f8a 100644 --- a/lib/std/hash/benchmark.zig +++ b/lib/std/hash/benchmark.zig @@ -38,16 +38,6 @@ const hashes = [_]Hash{ .name = "wyhash", .init_u64 = 0, }, - Hash{ - .ty = hash.XxHash64, - .name = "xxhash64", - .init_u64 = 0, - }, - Hash{ - .ty = hash.XxHash32, - .name = "xxhash32", - .init_u64 = 0, - }, Hash{ .ty = hash.Fnv1a_64, .name = "fnv1a", diff --git a/src/InternPool.zig b/src/InternPool.zig index bd7028b87947..0363003b3684 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -69,6 +69,7 @@ const assert = std.debug.assert; const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Limb = std.math.big.Limb; +const Hash = std.hash.Wyhash; const InternPool = @This(); const Module = @import("Module.zig"); @@ -675,34 +676,34 @@ pub const Key = union(enum) { .empty_enum_value, .inferred_error_set_type, .un, - => |x| WyhashKing.hash(seed, asBytes(&x)), + => |x| Hash.hash(seed, asBytes(&x)), - .int_type => |x| WyhashKing.hash(seed + @enumToInt(x.signedness), asBytes(&x.bits)), - .union_type => |x| WyhashKing.hash(seed + @enumToInt(x.runtime_tag), asBytes(&x.index)), + .int_type => |x| Hash.hash(seed + @enumToInt(x.signedness), asBytes(&x.bits)), + .union_type => |x| Hash.hash(seed + @enumToInt(x.runtime_tag), asBytes(&x.index)), .error_union => |x| switch (x.val) { - .err_name => |y| WyhashKing.hash(seed + 0, asBytes(&x.ty) ++ asBytes(&y)), - .payload => |y| WyhashKing.hash(seed + 1, asBytes(&x.ty) ++ asBytes(&y)), + .err_name => |y| Hash.hash(seed + 0, asBytes(&x.ty) ++ asBytes(&y)), + .payload => |y| Hash.hash(seed + 1, asBytes(&x.ty) ++ asBytes(&y)), }, - .runtime_value => |x| WyhashKing.hash(seed, asBytes(&x.val)), - .opaque_type => |x| WyhashKing.hash(seed, asBytes(&x.decl)), + .runtime_value => |x| Hash.hash(seed, asBytes(&x.val)), + .opaque_type => |x| Hash.hash(seed, asBytes(&x.decl)), .enum_type => |enum_type| { - var hasher = std.hash.Wyhash.init(seed); + var hasher = Hash.init(seed); std.hash.autoHash(&hasher, enum_type.decl); return hasher.final(); }, .variable => |variable| { - var hasher = std.hash.Wyhash.init(seed); + var hasher = Hash.init(seed); std.hash.autoHash(&hasher, variable.decl); return hasher.final(); }, - .extern_func => |x| WyhashKing.hash(seed, asBytes(&x.ty) ++ asBytes(&x.decl)), + .extern_func => |x| Hash.hash(seed, asBytes(&x.ty) ++ asBytes(&x.decl)), .int => |int| { - var hasher = std.hash.Wyhash.init(seed); + var hasher = Hash.init(seed); // Canonicalize all integers by converting them to BigIntConst. switch (int.storage) { .u64, .i64, .big_int => { @@ -725,7 +726,7 @@ pub const Key = union(enum) { }, .float => |float| { - var hasher = std.hash.Wyhash.init(seed); + var hasher = Hash.init(seed); std.hash.autoHash(&hasher, float.ty); switch (float.storage) { inline else => |val| std.hash.autoHash( @@ -743,19 +744,19 @@ pub const Key = union(enum) { const seed2 = seed + @enumToInt(addr); const common = asBytes(&ptr.ty) ++ asBytes(&ptr.len); return switch (ptr.addr) { - .decl => |x| WyhashKing.hash(seed2, common ++ asBytes(&x)), + .decl => |x| Hash.hash(seed2, common ++ asBytes(&x)), - .mut_decl => |x| WyhashKing.hash( + .mut_decl => |x| Hash.hash( seed2, asBytes(&x.decl) ++ asBytes(&x.runtime_index), ), - .int, .eu_payload, .opt_payload, .comptime_field => |int| WyhashKing.hash( + .int, .eu_payload, .opt_payload, .comptime_field => |int| Hash.hash( seed2, asBytes(&int), ), - .elem, .field => |x| WyhashKing.hash( + .elem, .field => |x| Hash.hash( seed2, asBytes(&x.base) ++ asBytes(&x.index), ), @@ -763,7 +764,7 @@ pub const Key = union(enum) { }, .aggregate => |aggregate| { - var hasher = std.hash.Wyhash.init(seed); + var hasher = Hash.init(seed); std.hash.autoHash(&hasher, aggregate.ty); const len = ip.aggregateTypeLen(aggregate.ty); const child = switch (ip.indexToKey(aggregate.ty)) { @@ -823,13 +824,13 @@ pub const Key = union(enum) { }, .error_set_type => |error_set_type| { - var hasher = std.hash.Wyhash.init(seed); + var hasher = Hash.init(seed); for (error_set_type.names) |elem| std.hash.autoHash(&hasher, elem); return hasher.final(); }, .anon_struct_type => |anon_struct_type| { - var hasher = std.hash.Wyhash.init(seed); + var hasher = Hash.init(seed); for (anon_struct_type.types) |elem| std.hash.autoHash(&hasher, elem); for (anon_struct_type.values) |elem| std.hash.autoHash(&hasher, elem); for (anon_struct_type.names) |elem| std.hash.autoHash(&hasher, elem); @@ -837,7 +838,7 @@ pub const Key = union(enum) { }, .func_type => |func_type| { - var hasher = std.hash.Wyhash.init(seed); + var hasher = Hash.init(seed); for (func_type.param_types) |param_type| std.hash.autoHash(&hasher, param_type); std.hash.autoHash(&hasher, func_type.return_type); std.hash.autoHash(&hasher, func_type.comptime_bits); @@ -851,7 +852,7 @@ pub const Key = union(enum) { }, .memoized_call => |memoized_call| { - var hasher = std.hash.Wyhash.init(seed); + var hasher = Hash.init(seed); std.hash.autoHash(&hasher, memoized_call.func); for (memoized_call.arg_values) |arg| std.hash.autoHash(&hasher, arg); return hasher.final(); @@ -5744,79 +5745,3 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .none => unreachable, // special tag }; } - -/// I got this from King, using this temporarily until std lib hashing can be -/// improved to make stateless hashing performant. Currently the -/// implementations suffer from not special casing small lengths and not taking -/// advantage of comptime-known lengths, both of which this implementation -/// does. -const WyhashKing = struct { - inline fn mum(pair: *[2]u64) void { - const x = @as(u128, pair[0]) *% pair[1]; - pair[0] = @truncate(u64, x); - pair[1] = @truncate(u64, x >> 64); - } - - inline fn mix(a: u64, b: u64) u64 { - var pair = [_]u64{ a, b }; - mum(&pair); - return pair[0] ^ pair[1]; - } - - inline fn read(comptime I: type, in: []const u8) I { - return std.mem.readIntLittle(I, in[0..@sizeOf(I)]); - } - - const secret = [_]u64{ - 0xa0761d6478bd642f, - 0xe7037ed1a0b428db, - 0x8ebc6af09c88c6e3, - 0x589965cc75374cc3, - }; - - fn hash(seed: u64, input: anytype) u64 { - var in: []const u8 = input; - var last = std.mem.zeroes([2]u64); - const starting_len: u64 = input.len; - var state = seed ^ mix(seed ^ secret[0], secret[1]); - - if (in.len <= 16) { - if (in.len >= 4) { - const end = (in.len >> 3) << 2; - last[0] = (@as(u64, read(u32, in)) << 32) | read(u32, in[end..]); - last[1] = (@as(u64, read(u32, in[in.len - 4 ..])) << 32) | read(u32, in[in.len - 4 - end ..]); - } else if (in.len > 0) { - last[0] = (@as(u64, in[0]) << 16) | (@as(u64, in[in.len >> 1]) << 8) | in[in.len - 1]; - } - } else { - large: { - if (in.len <= 48) break :large; - var split = [_]u64{ state, state, state }; - while (true) { - for (&split, 0..) |*lane, i| { - const a = read(u64, in[(i * 2) * 8 ..]) ^ secret[i + 1]; - const b = read(u64, in[((i * 2) + 1) * 8 ..]) ^ lane.*; - lane.* = mix(a, b); - } - in = in[48..]; - if (in.len > 48) continue; - state = split[0] ^ (split[1] ^ split[2]); - break :large; - } - } - while (true) { - if (in.len <= 16) break; - state = mix(read(u64, in) ^ secret[1], read(u64, in[8..]) ^ state); - in = in[16..]; - if (in.len <= 16) break; - } - last[0] = read(u64, in[in.len - 16 ..]); - last[1] = read(u64, in[in.len - 8 ..]); - } - - last[0] ^= secret[1]; - last[1] ^= state; - mum(&last); - return mix(last[0] ^ secret[0] ^ starting_len, last[1] ^ secret[1]); - } -}; From 2a6b91874ae970c0fba63f8c1357da5a57feec27 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 3 Jun 2023 15:46:16 +0100 Subject: [PATCH 182/205] stage2: pass most test cases under InternPool All but 2 test cases now pass (tested on x86_64 Linux, native only). The remaining two signify an issue requiring a larger refactor, which I will do in a separate commit. Notable changes: * Fix uninitialized memory when allocating objects from free lists * Implement TypedValue printing for pointers * Fix some TypedValue printing logic * Work around non-existence of InternPool.remove implementation --- src/InternPool.zig | 41 +++-- src/Module.zig | 8 +- src/Sema.zig | 57 ++++--- src/TypedValue.zig | 143 +++++++++++++++++- src/type.zig | 3 + ...ccess_non-existent_member_of_error_set.zig | 1 - ...ction_which_must_be_comptime_evaluated.zig | 2 +- ..._known_at_comptime_violates_error_sets.zig | 7 +- ...mplicit_cast_of_error_set_not_a_subset.zig | 6 +- .../int_to_err_non_global_invalid_number.zig | 3 +- .../invalid_non-exhaustive_enum_to_union.zig | 2 +- ..._when_coercing_pointer_to_anon_literal.zig | 4 +- .../return_invalid_type_from_test.zig | 6 +- ...n_invalid_value_of_non-exhaustive_enum.zig | 4 +- .../compile_errors/tuple_init_edge_cases.zig | 2 +- ...type_mismatch_with_tuple_concatenation.zig | 2 +- 16 files changed, 233 insertions(+), 58 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 0363003b3684..a46f765ad5bb 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -400,14 +400,21 @@ pub const Key = union(enum) { /// integer tag type of the enum. pub fn tagValueIndex(self: EnumType, ip: *const InternPool, tag_val: Index) ?u32 { assert(tag_val != .none); + // TODO: we should probably decide a single interface for this function, but currently + // it's being called with both tag values and underlying ints. Fix this! + const int_tag_val = switch (ip.indexToKey(tag_val)) { + .enum_tag => |enum_tag| enum_tag.int, + .int => tag_val, + else => unreachable, + }; if (self.values_map.unwrap()) |values_map| { const map = &ip.maps.items[@enumToInt(values_map)]; const adapter: Index.Adapter = .{ .indexes = self.values }; - const field_index = map.getIndexAdapted(tag_val, adapter) orelse return null; + const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null; return @intCast(u32, field_index); } - // Auto-numbered enum. Convert `tag_val` to field index. - switch (ip.indexToKey(tag_val).int.storage) { + // Auto-numbered enum. Convert `int_tag_val` to field index. + switch (ip.indexToKey(int_tag_val).int.storage) { .u64 => |x| { if (x >= self.names.len) return null; return @intCast(u32, x); @@ -4261,12 +4268,8 @@ fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex { /// This operation only happens under compile error conditions. /// Leak the index until the next garbage collection. -pub fn remove(ip: *InternPool, index: Index) void { - _ = ip; - _ = index; - @setCold(true); - @panic("TODO this is a bit problematic to implement, could we maybe just never support a remove() operation on InternPool?"); -} +/// TODO: this is a bit problematic to implement, can we get away without it? +pub const remove = @compileError("InternPool.remove is not currently a supported operation; put a TODO there instead"); fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void { const limbs_len = @intCast(u32, limbs.len); @@ -5161,7 +5164,10 @@ pub fn createStruct( gpa: Allocator, initialization: Module.Struct, ) Allocator.Error!Module.Struct.Index { - if (ip.structs_free_list.popOrNull()) |index| return index; + if (ip.structs_free_list.popOrNull()) |index| { + ip.allocated_structs.at(@enumToInt(index)).* = initialization; + return index; + } const ptr = try ip.allocated_structs.addOne(gpa); ptr.* = initialization; return @intToEnum(Module.Struct.Index, ip.allocated_structs.len - 1); @@ -5180,7 +5186,10 @@ pub fn createUnion( gpa: Allocator, initialization: Module.Union, ) Allocator.Error!Module.Union.Index { - if (ip.unions_free_list.popOrNull()) |index| return index; + if (ip.unions_free_list.popOrNull()) |index| { + ip.allocated_unions.at(@enumToInt(index)).* = initialization; + return index; + } const ptr = try ip.allocated_unions.addOne(gpa); ptr.* = initialization; return @intToEnum(Module.Union.Index, ip.allocated_unions.len - 1); @@ -5199,7 +5208,10 @@ pub fn createFunc( gpa: Allocator, initialization: Module.Fn, ) Allocator.Error!Module.Fn.Index { - if (ip.funcs_free_list.popOrNull()) |index| return index; + if (ip.funcs_free_list.popOrNull()) |index| { + ip.allocated_funcs.at(@enumToInt(index)).* = initialization; + return index; + } const ptr = try ip.allocated_funcs.addOne(gpa); ptr.* = initialization; return @intToEnum(Module.Fn.Index, ip.allocated_funcs.len - 1); @@ -5218,7 +5230,10 @@ pub fn createInferredErrorSet( gpa: Allocator, initialization: Module.Fn.InferredErrorSet, ) Allocator.Error!Module.Fn.InferredErrorSet.Index { - if (ip.inferred_error_sets_free_list.popOrNull()) |index| return index; + if (ip.inferred_error_sets_free_list.popOrNull()) |index| { + ip.allocated_inferred_error_sets.at(@enumToInt(index)).* = initialization; + return index; + } const ptr = try ip.allocated_inferred_error_sets.addOne(gpa); ptr.* = initialization; return @intToEnum(Module.Fn.InferredErrorSet.Index, ip.allocated_inferred_error_sets.len - 1); diff --git a/src/Module.zig b/src/Module.zig index c1d6b8157a4e..cb3e8884e309 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4374,7 +4374,8 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .index = struct_index.toOptional(), .namespace = new_namespace_index.toOptional(), } }); - errdefer mod.intern_pool.remove(struct_ty); + // TODO: figure out InternPool removals for incremental compilation + //errdefer mod.intern_pool.remove(struct_ty); new_namespace.ty = struct_ty.toType(); file.root_decl = new_decl_index.toOptional(); @@ -5682,7 +5683,10 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { } pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index { - if (mod.namespaces_free_list.popOrNull()) |index| return index; + if (mod.namespaces_free_list.popOrNull()) |index| { + mod.allocated_namespaces.at(@enumToInt(index)).* = initialization; + return index; + } const ptr = try mod.allocated_namespaces.addOne(mod.gpa); ptr.* = initialization; return @intToEnum(Namespace.Index, mod.allocated_namespaces.len - 1); diff --git a/src/Sema.zig b/src/Sema.zig index be354ff3500c..715c63c77c9a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2801,7 +2801,8 @@ fn zirStructDecl( .index = struct_index.toOptional(), .namespace = new_namespace_index.toOptional(), } }); - errdefer mod.intern_pool.remove(struct_ty); + // TODO: figure out InternPool removals for incremental compilation + //errdefer mod.intern_pool.remove(struct_ty); new_decl.val = struct_ty.toValue(); new_namespace.ty = struct_ty.toType(); @@ -3012,7 +3013,8 @@ fn zirEnumDecl( else .explicit, }); - errdefer if (!done) mod.intern_pool.remove(incomplete_enum.index); + // TODO: figure out InternPool removals for incremental compilation + //errdefer if (!done) mod.intern_pool.remove(incomplete_enum.index); new_decl.val = incomplete_enum.index.toValue(); new_namespace.ty = incomplete_enum.index.toType(); @@ -3260,7 +3262,8 @@ fn zirUnionDecl( .ReleaseFast, .ReleaseSmall => .none, }, } }); - errdefer mod.intern_pool.remove(union_ty); + // TODO: figure out InternPool removals for incremental compilation + //errdefer mod.intern_pool.remove(union_ty); new_decl.val = union_ty.toValue(); new_namespace.ty = union_ty.toType(); @@ -3321,7 +3324,8 @@ fn zirOpaqueDecl( .decl = new_decl_index, .namespace = new_namespace_index, } }); - errdefer mod.intern_pool.remove(opaque_ty); + // TODO: figure out InternPool removals for incremental compilation + //errdefer mod.intern_pool.remove(opaque_ty); new_decl.val = opaque_ty.toValue(); new_namespace.ty = opaque_ty.toType(); @@ -19424,7 +19428,10 @@ fn zirReify( }, name_strategy, "enum", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer { + new_decl.has_tv = false; // namespace and val were destroyed by later errdefers + mod.abortAnonDecl(new_decl_index); + } // Define our empty enum decl const fields_len = @intCast(u32, try sema.usizeCast(block, src, fields_val.sliceLen(mod))); @@ -19439,7 +19446,8 @@ fn zirReify( .explicit, .tag_ty = int_tag_ty.toIntern(), }); - errdefer ip.remove(incomplete_enum.index); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(incomplete_enum.index); new_decl.val = incomplete_enum.index.toValue(); @@ -19514,7 +19522,10 @@ fn zirReify( }, name_strategy, "opaque", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer { + new_decl.has_tv = false; // namespace and val were destroyed by later errdefers + mod.abortAnonDecl(new_decl_index); + } const new_namespace_index = try mod.createNamespace(.{ .parent = block.namespace.toOptional(), @@ -19528,7 +19539,8 @@ fn zirReify( .decl = new_decl_index, .namespace = new_namespace_index, } }); - errdefer ip.remove(opaque_ty); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(opaque_ty); new_decl.val = opaque_ty.toValue(); new_namespace.ty = opaque_ty.toType(); @@ -19568,7 +19580,10 @@ fn zirReify( }, name_strategy, "union", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer { + new_decl.has_tv = false; // namespace and val were destroyed by later errdefers + mod.abortAnonDecl(new_decl_index); + } const new_namespace_index = try mod.createNamespace(.{ .parent = block.namespace.toOptional(), @@ -19601,7 +19616,8 @@ fn zirReify( .ReleaseFast, .ReleaseSmall => .none, }, } }); - errdefer ip.remove(union_ty); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(union_ty); new_decl.val = union_ty.toValue(); new_namespace.ty = union_ty.toType(); @@ -19865,7 +19881,10 @@ fn reifyStruct( }, name_strategy, "struct", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer { + new_decl.has_tv = false; // namespace and val were destroyed by later errdefers + mod.abortAnonDecl(new_decl_index); + } const new_namespace_index = try mod.createNamespace(.{ .parent = block.namespace.toOptional(), @@ -19892,7 +19911,8 @@ fn reifyStruct( .index = struct_index.toOptional(), .namespace = new_namespace_index.toOptional(), } }); - errdefer ip.remove(struct_ty); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(struct_ty); new_decl.val = struct_ty.toValue(); new_namespace.ty = struct_ty.toType(); @@ -27515,8 +27535,8 @@ fn coerceInMemoryAllowedFns( if (rt != .ok) { return InMemoryCoercionResult{ .fn_return_type = .{ .child = try rt.dupe(sema.arena), - .actual = dest_return_type, - .wanted = src_return_type, + .actual = src_return_type, + .wanted = dest_return_type, } }; } }, @@ -29505,7 +29525,8 @@ fn coerceTupleToStruct( .ty = struct_ty.toIntern(), .storage = .{ .elems = field_vals }, } }); - errdefer ip.remove(struct_val); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(struct_val); return sema.addConstant(struct_ty, struct_val.toValue()); } @@ -34666,14 +34687,14 @@ fn floatToIntScalar( var big_int = try float128IntPartToBigInt(sema.arena, float); defer big_int.deinit(); - const result = try mod.intValue_big(int_ty, big_int.toConst()); + const cti_result = try mod.intValue_big(Type.comptime_int, big_int.toConst()); - if (!(try sema.intFitsInType(result, int_ty, null))) { + if (!(try sema.intFitsInType(cti_result, int_ty, null))) { return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{ val.fmtValue(float_ty, sema.mod), int_ty.fmt(sema.mod), }); } - return result; + return mod.getCoerced(cti_result, int_ty); } /// Asserts the value is an integer, and the destination type is ComptimeInt or Int. diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 0d771aa18428..723207eeffbe 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -203,7 +203,7 @@ pub fn print( .extern_func => |extern_func| return writer.print("(extern function '{s}')", .{ mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name), }), - .func => |func| return writer.print("(function '{d}')", .{ + .func => |func| return writer.print("(function '{s}')", .{ mod.intern_pool.stringToSlice(mod.declPtr(mod.funcPtr(func.index).owner_decl).name), }), .int => |int| switch (int.storage) { @@ -234,7 +234,12 @@ pub fn print( if (level == 0) { return writer.writeAll("(enum)"); } - + const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type; + if (enum_type.tagValueIndex(&mod.intern_pool, val.toIntern())) |tag_index| { + const tag_name = mod.intern_pool.stringToSlice(enum_type.names[tag_index]); + try writer.print(".{}", .{std.zig.fmtId(tag_name)}); + return; + } try writer.writeAll("@intToEnum("); try print(.{ .ty = Type.type, @@ -250,9 +255,129 @@ pub fn print( }, .empty_enum_value => return writer.writeAll("(empty enum value)"), .float => |float| switch (float.storage) { - inline else => |x| return writer.print("{}", .{x}), + inline else => |x| return writer.print("{d}", .{@floatCast(f64, x)}), + }, + .ptr => |ptr| { + if (ptr.addr == .int) { + const i = mod.intern_pool.indexToKey(ptr.addr.int).int; + switch (i.storage) { + inline else => |addr| return writer.print("{x:0>8}", .{addr}), + } + } + + const ptr_ty = mod.intern_pool.indexToKey(ty.toIntern()).ptr_type; + if (ptr_ty.flags.size == .Slice) { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const elem_ty = ptr_ty.child.toType(); + const len = ptr.len.toValue().toUnsignedInt(mod); + if (elem_ty.eql(Type.u8, mod)) str: { + const max_len = @min(len, max_string_len); + var buf: [max_string_len]u8 = undefined; + for (buf[0..max_len], 0..) |*c, i| { + const elem = try val.elemValue(mod, i); + if (elem.isUndef(mod)) break :str; + c.* = @intCast(u8, elem.toUnsignedInt(mod)); + } + const truncated = if (len > max_string_len) " (truncated)" else ""; + return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); + } + try writer.writeAll(".{ "); + const max_len = @min(len, max_aggregate_items); + for (0..max_len) |i| { + if (i != 0) try writer.writeAll(", "); + try print(.{ + .ty = elem_ty, + .val = try val.elemValue(mod, i), + }, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); + } + + switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); + if (level == 0) return writer.print("(decl '{s}')", .{mod.intern_pool.stringToSlice(decl.name)}); + return print(.{ + .ty = decl.ty, + .val = decl.val, + }, writer, level - 1, mod); + }, + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + if (level == 0) return writer.print("(mut decl '{s}')", .{mod.intern_pool.stringToSlice(decl.name)}); + return print(.{ + .ty = decl.ty, + .val = decl.val, + }, writer, level - 1, mod); + }, + .comptime_field => |field_val_ip| { + return print(.{ + .ty = mod.intern_pool.typeOf(field_val_ip).toType(), + .val = field_val_ip.toValue(), + }, writer, level - 1, mod); + }, + .int => unreachable, + .eu_payload => |eu_ip| { + try writer.writeAll("(payload of "); + try print(.{ + .ty = mod.intern_pool.typeOf(eu_ip).toType(), + .val = eu_ip.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(")"); + }, + .opt_payload => |opt_ip| { + try print(.{ + .ty = mod.intern_pool.typeOf(opt_ip).toType(), + .val = opt_ip.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(".?"); + }, + .elem => |elem| { + try print(.{ + .ty = mod.intern_pool.typeOf(elem.base).toType(), + .val = elem.base.toValue(), + }, writer, level - 1, mod); + try writer.print("[{}]", .{elem.index}); + }, + .field => |field| { + const container_ty = mod.intern_pool.typeOf(field.base).toType(); + try print(.{ + .ty = container_ty, + .val = field.base.toValue(), + }, writer, level - 1, mod); + + switch (container_ty.zigTypeTag(mod)) { + .Struct => { + if (container_ty.isTuple(mod)) { + try writer.print("[{d}]", .{field.index}); + } + const field_name_ip = container_ty.structFieldName(field.index, mod); + const field_name = mod.intern_pool.stringToSlice(field_name_ip); + try writer.print(".{}", .{std.zig.fmtId(field_name)}); + }, + .Union => { + const field_name_ip = container_ty.unionFields(mod).keys()[field.index]; + const field_name = mod.intern_pool.stringToSlice(field_name_ip); + try writer.print(".{}", .{std.zig.fmtId(field_name)}); + }, + .Pointer => { + std.debug.assert(container_ty.isSlice(mod)); + try writer.writeAll(switch (field.index) { + Value.slice_ptr_index => ".ptr", + Value.slice_len_index => ".len", + else => unreachable, + }); + }, + else => unreachable, + } + }, + } }, - .ptr => return writer.writeAll("(ptr)"), .opt => |opt| switch (opt.val) { .none => return writer.writeAll("null"), else => |payload| { @@ -261,7 +386,15 @@ pub fn print( }, }, .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => |bytes| return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}), + .bytes => |bytes| { + // Strip the 0 sentinel off of strings before printing + const zero_sent = blk: { + const sent = ty.sentinel(mod) orelse break :blk false; + break :blk sent.eql(Value.zero_u8, Type.u8, mod); + }; + const str = if (zero_sent) bytes[0..bytes.len - 1] else bytes; + return writer.print("\"{}\"", .{std.zig.fmtEscapes(str)}); + }, .elems, .repeated_elem => return printAggregate(ty, val, writer, level, mod), }, .un => |un| { diff --git a/src/type.zig b/src/type.zig index d9ae710b2d1b..a6997bc59068 100644 --- a/src/type.zig +++ b/src/type.zig @@ -345,6 +345,9 @@ pub const Type = struct { } }, .anon_struct_type => |anon_struct| { + if (anon_struct.types.len == 0) { + return writer.writeAll("@TypeOf(.{})"); + } try writer.writeAll("struct{"); for (anon_struct.types, anon_struct.values, 0..) |field_ty, val, i| { if (i != 0) try writer.writeAll(", "); diff --git a/test/cases/compile_errors/access_non-existent_member_of_error_set.zig b/test/cases/compile_errors/access_non-existent_member_of_error_set.zig index 765bbe59c3c1..7f0bc562ac39 100644 --- a/test/cases/compile_errors/access_non-existent_member_of_error_set.zig +++ b/test/cases/compile_errors/access_non-existent_member_of_error_set.zig @@ -9,4 +9,3 @@ comptime { // target=native // // :3:18: error: no error named 'Bar' in 'error{A}' -// :1:13: note: error set declared here diff --git a/test/cases/compile_errors/compile_log_statement_inside_function_which_must_be_comptime_evaluated.zig b/test/cases/compile_errors/compile_log_statement_inside_function_which_must_be_comptime_evaluated.zig index 8a39fdec462b..64fae2aac38f 100644 --- a/test/cases/compile_errors/compile_log_statement_inside_function_which_must_be_comptime_evaluated.zig +++ b/test/cases/compile_errors/compile_log_statement_inside_function_which_must_be_comptime_evaluated.zig @@ -14,4 +14,4 @@ export fn entry() void { // :2:5: error: found compile log statement // // Compile Log Output: -// @as(*const [3:0]u8, "i32\x00") +// @as(*const [3:0]u8, "i32") diff --git a/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig b/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig index 22fc965769e5..a3af883198fd 100644 --- a/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig +++ b/test/cases/compile_errors/explicit_error_set_cast_known_at_comptime_violates_error_sets.zig @@ -1,5 +1,5 @@ -const Set1 = error {A, B}; -const Set2 = error {A, C}; +const Set1 = error{ A, B }; +const Set2 = error{ A, C }; comptime { var x = Set1.B; var y = @errSetCast(Set2, x); @@ -10,5 +10,4 @@ comptime { // backend=stage2 // target=native // -// :5:13: error: 'error.B' not a member of error set 'error{A,C}' -// :2:14: note: error set declared here +// :5:13: error: 'error.B' not a member of error set 'error{C,A}' diff --git a/test/cases/compile_errors/implicit_cast_of_error_set_not_a_subset.zig b/test/cases/compile_errors/implicit_cast_of_error_set_not_a_subset.zig index 0a182343b99a..5e5b57680c80 100644 --- a/test/cases/compile_errors/implicit_cast_of_error_set_not_a_subset.zig +++ b/test/cases/compile_errors/implicit_cast_of_error_set_not_a_subset.zig @@ -1,5 +1,5 @@ -const Set1 = error{A, B}; -const Set2 = error{A, C}; +const Set1 = error{ A, B }; +const Set2 = error{ A, C }; export fn entry() void { foo(Set1.B); } @@ -12,5 +12,5 @@ fn foo(set1: Set1) void { // backend=stage2 // target=native // -// :7:19: error: expected type 'error{A,C}', found 'error{A,B}' +// :7:19: error: expected type 'error{C,A}', found 'error{A,B}' // :7:19: note: 'error.B' not a member of destination error set diff --git a/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig b/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig index 43aad76f4599..f837ccd532e6 100644 --- a/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig +++ b/test/cases/compile_errors/int_to_err_non_global_invalid_number.zig @@ -16,5 +16,4 @@ comptime { // backend=llvm // target=native // -// :11:13: error: 'error.B' not a member of error set 'error{A,C}' -// :5:14: note: error set declared here +// :11:13: error: 'error.B' not a member of error set 'error{C,A}' diff --git a/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig b/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig index c7fc39f7693b..fa58c0845a4a 100644 --- a/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig +++ b/test/cases/compile_errors/invalid_non-exhaustive_enum_to_union.zig @@ -24,5 +24,5 @@ export fn bar() void { // // :12:16: error: runtime coercion to union 'tmp.U' from non-exhaustive enum // :1:11: note: enum declared here -// :17:16: error: union 'tmp.U' has no tag with value '15' +// :17:16: error: union 'tmp.U' has no tag with value '@intToEnum(tmp.E, 15)' // :6:11: note: union declared here diff --git a/test/cases/compile_errors/pointer_attributes_checked_when_coercing_pointer_to_anon_literal.zig b/test/cases/compile_errors/pointer_attributes_checked_when_coercing_pointer_to_anon_literal.zig index c0a0b06af0f9..da7f2492d133 100644 --- a/test/cases/compile_errors/pointer_attributes_checked_when_coercing_pointer_to_anon_literal.zig +++ b/test/cases/compile_errors/pointer_attributes_checked_when_coercing_pointer_to_anon_literal.zig @@ -16,9 +16,9 @@ comptime { // backend=stage2 // target=native // -// :2:29: error: expected type '[][]const u8', found '*const tuple{comptime *const [5:0]u8 = "hello", comptime *const [5:0]u8 = "world"}' +// :2:29: error: expected type '[][]const u8', found '*const struct{comptime *const [5:0]u8 = "hello", comptime *const [5:0]u8 = "world"}' // :2:29: note: cast discards const qualifier -// :6:31: error: expected type '*[2][]const u8', found '*const tuple{comptime *const [5:0]u8 = "hello", comptime *const [5:0]u8 = "world"}' +// :6:31: error: expected type '*[2][]const u8', found '*const struct{comptime *const [5:0]u8 = "hello", comptime *const [5:0]u8 = "world"}' // :6:31: note: cast discards const qualifier // :11:19: error: expected type '*tmp.S', found '*const struct{comptime a: comptime_int = 2}' // :11:19: note: cast discards const qualifier diff --git a/test/cases/compile_errors/return_invalid_type_from_test.zig b/test/cases/compile_errors/return_invalid_type_from_test.zig index a954bd7ee569..acc932cb0a23 100644 --- a/test/cases/compile_errors/return_invalid_type_from_test.zig +++ b/test/cases/compile_errors/return_invalid_type_from_test.zig @@ -1,8 +1,10 @@ -test "example" { return 1; } +test "example" { + return 1; +} // error // backend=stage2 // target=native // is_test=1 // -// :1:25: error: expected type '@typeInfo(@typeInfo(@TypeOf(tmp.test.example)).Fn.return_type.?).ErrorUnion.error_set!void', found 'comptime_int' \ No newline at end of file +// :2:12: error: expected type 'anyerror!void', found 'comptime_int' diff --git a/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig b/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig index 1de0d1c14501..3523a360549f 100644 --- a/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig +++ b/test/cases/compile_errors/tagName_on_invalid_value_of_non-exhaustive_enum.zig @@ -1,5 +1,5 @@ test "enum" { - const E = enum(u8) {A, B, _}; + const E = enum(u8) { A, B, _ }; _ = @tagName(@intToEnum(E, 5)); } @@ -8,5 +8,5 @@ test "enum" { // target=native // is_test=1 // -// :3:9: error: no field with value '5' in enum 'test.enum.E' +// :3:9: error: no field with value '@intToEnum(tmp.test.enum.E, 5)' in enum 'test.enum.E' // :2:15: note: declared here diff --git a/test/cases/compile_errors/tuple_init_edge_cases.zig b/test/cases/compile_errors/tuple_init_edge_cases.zig index 32b52cdc1f0f..f093515a3867 100644 --- a/test/cases/compile_errors/tuple_init_edge_cases.zig +++ b/test/cases/compile_errors/tuple_init_edge_cases.zig @@ -41,4 +41,4 @@ pub export fn entry5() void { // :12:14: error: missing tuple field with index 1 // :17:14: error: missing tuple field with index 1 // :29:14: error: expected at most 2 tuple fields; found 3 -// :34:30: error: index '2' out of bounds of tuple 'tuple{comptime comptime_int = 123, u32}' +// :34:30: error: index '2' out of bounds of tuple 'struct{comptime comptime_int = 123, u32}' diff --git a/test/cases/compile_errors/type_mismatch_with_tuple_concatenation.zig b/test/cases/compile_errors/type_mismatch_with_tuple_concatenation.zig index 284d3c0d0d8a..9f360e2afe7f 100644 --- a/test/cases/compile_errors/type_mismatch_with_tuple_concatenation.zig +++ b/test/cases/compile_errors/type_mismatch_with_tuple_concatenation.zig @@ -7,4 +7,4 @@ export fn entry() void { // backend=stage2 // target=native // -// :3:11: error: expected type '@TypeOf(.{})', found 'tuple{comptime comptime_int = 1, comptime comptime_int = 2, comptime comptime_int = 3}' +// :3:11: error: expected type '@TypeOf(.{})', found 'struct{comptime comptime_int = 1, comptime comptime_int = 2, comptime comptime_int = 3}' From 7702af5eb2d986d46b6978dafcf4b174313167e4 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 3 Jun 2023 18:22:43 +0100 Subject: [PATCH 183/205] Sema: fix int arithmetic overflow checks Previously, these checks worked by performing the arithmetic operation, then checking whether the result fit in the type in question. Since all values are now typed, this approach was no longer valid, and was tripping some assertions due to trying to store too-large values in smaller types. Now, `intAdd`, `intSub`, `intMul` and `intDiv` all check for overflow, and if it happens, re-do the operation with the result being a `comptime_int`, and reporting the error (and vector index) to the caller so that the error can be reported. After this change, all test cases are passing. --- src/Sema.zig | 184 +++++++++++++++++++++++++++++++++++--------------- src/value.zig | 77 +++++++++++++++++++-- 2 files changed, 200 insertions(+), 61 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 715c63c77c9a..e4b8f84135d6 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3125,11 +3125,11 @@ fn zirEnumDecl( return sema.failWithOwnedErrorMsg(msg); } - if (has_tag_value) { + const tag_overflow = if (has_tag_value) overflow: { const tag_val_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const tag_inst = try sema.resolveInst(tag_val_ref); - const tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) { + last_tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) { error.NeededSourceLocation => { const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, @@ -3140,43 +3140,50 @@ fn zirEnumDecl( }, else => |e| return e, }; - last_tag_val = tag_val; - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.toIntern())) |other_index| { + if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true; + last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty); + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| { const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, }).lazy; const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{tag_val.fmtValue(int_tag_ty, sema.mod)}); + const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other occurrence here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - } else if (any_values) { - const tag_val = if (last_tag_val) |val| - try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty) + break :overflow false; + } else if (any_values) overflow: { + var overflow: ?usize = null; + last_tag_val = if (last_tag_val) |val| + try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty, &overflow) else try mod.intValue(int_tag_ty, 0); - last_tag_val = tag_val; - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.toIntern())) |other_index| { + if (overflow != null) break :overflow true; + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| { const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{tag_val.fmtValue(int_tag_ty, sema.mod)}); + const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other occurrence here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - } else { - last_tag_val = try mod.intValue(int_tag_ty, field_i); - } + break :overflow false; + } else overflow: { + last_tag_val = try mod.intValue(Type.comptime_int, field_i); + if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true; + last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty); + break :overflow false; + }; - if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) { + if (tag_overflow) { const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = if (has_tag_value) .value else .name, @@ -9692,7 +9699,7 @@ fn intCast( const dest_range_val = if (wanted_info.signedness == .signed) range_val: { const one = try mod.intValue(unsigned_operand_ty, 1); const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, mod); - break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty); + break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty, undefined); } else try mod.getCoerced(dest_max_val, unsigned_operand_ty); const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val); @@ -11229,7 +11236,10 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({ // Previous validation has resolved any possible lazy values. - item = try sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty); + item = sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty) catch |err| switch (err) { + error.Overflow => unreachable, + else => |e| return e, + }; }) { cases_len += 1; @@ -13363,10 +13373,10 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (maybe_rhs_val) |rhs_val| { if (is_int) { - const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index); + var overflow_idx: ?usize = null; + const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return sema.addConstant(resolved_type, res); } else { @@ -13490,10 +13500,10 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!(modulus_val.compareAllWithZero(.eq, mod))) { return sema.fail(block, src, "exact division produced remainder", .{}); } - const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index); + var overflow_idx: ?usize = null; + const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return sema.addConstant(resolved_type, res); } else { @@ -13785,10 +13795,10 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_rhs_val) |rhs_val| { if (is_int) { - const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index); + var overflow_idx: ?usize = null; + const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return sema.addConstant(resolved_type, res); } else { @@ -14651,10 +14661,10 @@ fn analyzeArithmetic( } if (maybe_rhs_val) |rhs_val| { if (is_int) { - const sum = try sema.intAdd(lhs_val, rhs_val, resolved_type); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(sum, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, sum, vector_index); + var overflow_idx: ?usize = null; + const sum = try sema.intAdd(lhs_val, rhs_val, resolved_type, &overflow_idx); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, sum, vec_idx); } return sema.addConstant(resolved_type, sum); } else { @@ -14709,7 +14719,7 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { const val = if (scalar_tag == .ComptimeInt) - try sema.intAdd(lhs_val, rhs_val, resolved_type) + try sema.intAdd(lhs_val, rhs_val, resolved_type, undefined) else try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, mod); @@ -14748,10 +14758,10 @@ fn analyzeArithmetic( } if (maybe_rhs_val) |rhs_val| { if (is_int) { - const diff = try sema.intSub(lhs_val, rhs_val, resolved_type); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(diff, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, diff, vector_index); + var overflow_idx: ?usize = null; + const diff = try sema.intSub(lhs_val, rhs_val, resolved_type, &overflow_idx); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, diff, vec_idx); } return sema.addConstant(resolved_type, diff); } else { @@ -14806,7 +14816,7 @@ fn analyzeArithmetic( } if (maybe_rhs_val) |rhs_val| { const val = if (scalar_tag == .ComptimeInt) - try sema.intSub(lhs_val, rhs_val, resolved_type) + try sema.intSub(lhs_val, rhs_val, resolved_type, undefined) else try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, mod); @@ -14901,10 +14911,10 @@ fn analyzeArithmetic( } } if (is_int) { - const product = try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(product, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, product, vector_index); + var overflow_idx: ?usize = null; + const product = try lhs_val.intMul(rhs_val, resolved_type, &overflow_idx, sema.arena, sema.mod); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, product, vec_idx); } return sema.addConstant(resolved_type, product); } else { @@ -15008,7 +15018,7 @@ fn analyzeArithmetic( } const val = if (scalar_tag == .ComptimeInt) - try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod) + try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, sema.mod) else try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, sema.mod); @@ -33117,7 +33127,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } if (fields_len > 0) { - const field_count_val = try mod.intValue(int_tag_ty, fields_len - 1); + const field_count_val = try mod.intValue(Type.comptime_int, fields_len - 1); if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) { const msg = msg: { const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{}); @@ -33217,7 +33227,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk val; } else blk: { const val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one_comptime_int, int_tag_ty) + try sema.intAdd(val, Value.one_comptime_int, int_tag_ty, undefined) else try mod.intValue(int_tag_ty, 0); last_tag_val = val; @@ -34435,7 +34445,28 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { try sema.types_to_resolve.put(sema.gpa, ty.toIntern(), {}); } -fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { +/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting +/// overflow_idx to the vector index the overflow was at (or 0 for a scalar). +fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { + var overflow: usize = undefined; + return sema.intAddInner(lhs, rhs, ty, &overflow) catch |err| switch (err) { + error.Overflow => { + const is_vec = ty.isVector(sema.mod); + overflow_idx.* = if (is_vec) overflow else 0; + const safe_ty = if (is_vec) try sema.mod.vectorType(.{ + .len = ty.vectorLen(sema.mod), + .child = .comptime_int_type, + }) else Type.comptime_int; + return sema.intAddInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) { + error.Overflow => unreachable, + else => |e| return e, + }; + }, + else => |e| return e, + }; +} + +fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); @@ -34443,7 +34474,14 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); + const val = sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) { + error.Overflow => { + overflow_idx.* = i; + return error.Overflow; + }, + else => |e| return e, + }; + scalar.* = try val.intern(scalar_ty, mod); } return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), @@ -34455,6 +34493,11 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const mod = sema.mod; + if (scalar_ty.toIntern() != .comptime_int_type) { + const res = try sema.intAddWithOverflowScalar(lhs, rhs, scalar_ty); + if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; + return res.wrapped_result; + } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -34467,10 +34510,6 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); - if (scalar_ty.toIntern() != .comptime_int_type) { - const int_info = scalar_ty.intInfo(mod); - result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits); - } return mod.intValue_big(scalar_ty, result_bigint.toConst()); } @@ -34485,7 +34524,7 @@ fn numberAddWrapScalar( if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; if (ty.zigTypeTag(mod) == .ComptimeInt) { - return sema.intAdd(lhs, rhs, ty); + return sema.intAdd(lhs, rhs, ty, undefined); } if (ty.isAnyFloat()) { @@ -34496,7 +34535,28 @@ fn numberAddWrapScalar( return overflow_result.wrapped_result; } -fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { +/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting +/// overflow_idx to the vector index the overflow was at (or 0 for a scalar). +fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { + var overflow: usize = undefined; + return sema.intSubInner(lhs, rhs, ty, &overflow) catch |err| switch (err) { + error.Overflow => { + const is_vec = ty.isVector(sema.mod); + overflow_idx.* = if (is_vec) overflow else 0; + const safe_ty = if (is_vec) try sema.mod.vectorType(.{ + .len = ty.vectorLen(sema.mod), + .child = .comptime_int_type, + }) else Type.comptime_int; + return sema.intSubInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) { + error.Overflow => unreachable, + else => |e| return e, + }; + }, + else => |e| return e, + }; +} + +fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); @@ -34504,7 +34564,14 @@ fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - scalar.* = try (try sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); + const val = sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) { + error.Overflow => { + overflow_idx.* = i; + return error.Overflow; + }, + else => |e| return e, + }; + scalar.* = try val.intern(scalar_ty, mod); } return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), @@ -34516,6 +34583,11 @@ fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const mod = sema.mod; + if (scalar_ty.toIntern() != .comptime_int_type) { + const res = try sema.intSubWithOverflowScalar(lhs, rhs, scalar_ty); + if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; + return res.wrapped_result; + } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -34542,7 +34614,7 @@ fn numberSubWrapScalar( if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; if (ty.zigTypeTag(mod) == .ComptimeInt) { - return sema.intSub(lhs, rhs, ty); + return sema.intSub(lhs, rhs, ty, undefined); } if (ty.isAnyFloat()) { diff --git a/src/value.zig b/src/value.zig index 8ab98bc99425..6f603c248e66 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2430,7 +2430,7 @@ pub const Value = struct { if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; if (ty.zigTypeTag(mod) == .ComptimeInt) { - return intMul(lhs, rhs, ty, arena, mod); + return intMul(lhs, rhs, ty, undefined, arena, mod); } if (ty.isAnyFloat()) { @@ -2710,14 +2710,42 @@ pub const Value = struct { return mod.intValue_big(ty, result_bigint.toConst()); } - pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting + /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). + pub fn intDiv(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, mod: *Module) !Value { + var overflow: usize = undefined; + return intDivInner(lhs, rhs, ty, &overflow, allocator, mod) catch |err| switch (err) { + error.Overflow => { + const is_vec = ty.isVector(mod); + overflow_idx.* = if (is_vec) overflow else 0; + const safe_ty = if (is_vec) try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = .comptime_int_type, + }) else Type.comptime_int; + return intDivInner(lhs, rhs, safe_ty, undefined, allocator, mod) catch |err1| switch (err1) { + error.Overflow => unreachable, + else => |e| return e, + }; + }, + else => |e| return e, + }; + } + + fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); + const val = intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod) catch |err| switch (err) { + error.Overflow => { + overflow_idx.* = i; + return error.Overflow; + }, + else => |e| return e, + }; + scalar.* = try val.intern(scalar_ty, mod); } return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), @@ -2749,6 +2777,12 @@ pub const Value = struct { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); + if (ty.toIntern() != .comptime_int_type) { + const info = ty.intInfo(mod); + if (!result_q.toConst().fitsInTwosComp(info.signedness, info.bits)) { + return error.Overflow; + } + } return mod.intValue_big(ty, result_q.toConst()); } @@ -2934,14 +2968,42 @@ pub const Value = struct { } })).toValue(); } - pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting + /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). + pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, mod: *Module) !Value { + var overflow: usize = undefined; + return intMulInner(lhs, rhs, ty, &overflow, allocator, mod) catch |err| switch (err) { + error.Overflow => { + const is_vec = ty.isVector(mod); + overflow_idx.* = if (is_vec) overflow else 0; + const safe_ty = if (is_vec) try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = .comptime_int_type, + }) else Type.comptime_int; + return intMulInner(lhs, rhs, safe_ty, undefined, allocator, mod) catch |err1| switch (err1) { + error.Overflow => unreachable, + else => |e| return e, + }; + }, + else => |e| return e, + }; + } + + fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); + const val = intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod) catch |err| switch (err) { + error.Overflow => { + overflow_idx.* = i; + return error.Overflow; + }, + else => |e| return e, + }; + scalar.* = try val.intern(scalar_ty, mod); } return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), @@ -2952,6 +3014,11 @@ pub const Value = struct { } pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + if (ty.toIntern() != .comptime_int_type) { + const res = try intMulWithOverflowScalar(lhs, rhs, ty, allocator, mod); + if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; + return res.wrapped_result; + } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; From dce80f67d4ab9a9387be595c0275853369ffb7e4 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 4 Jun 2023 10:02:16 -0400 Subject: [PATCH 184/205] Sema: fix crashes accessing undefined values --- src/Sema.zig | 54 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index e4b8f84135d6..99b6c1dba667 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2770,8 +2770,8 @@ fn zirStructDecl( // InternPool index. const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = undefined, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, small.name_strategy, "struct", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; @@ -2804,6 +2804,7 @@ fn zirStructDecl( // TODO: figure out InternPool removals for incremental compilation //errdefer mod.intern_pool.remove(struct_ty); + new_decl.ty = Type.type; new_decl.val = struct_ty.toValue(); new_namespace.ty = struct_ty.toType(); @@ -2973,8 +2974,8 @@ fn zirEnumDecl( var done = false; const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = undefined, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, small.name_strategy, "enum", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; @@ -3016,6 +3017,7 @@ fn zirEnumDecl( // TODO: figure out InternPool removals for incremental compilation //errdefer if (!done) mod.intern_pool.remove(incomplete_enum.index); + new_decl.ty = Type.type; new_decl.val = incomplete_enum.index.toValue(); new_namespace.ty = incomplete_enum.index.toType(); @@ -3232,8 +3234,8 @@ fn zirUnionDecl( // InternPool index. const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = undefined, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, small.name_strategy, "union", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; @@ -3272,6 +3274,7 @@ fn zirUnionDecl( // TODO: figure out InternPool removals for incremental compilation //errdefer mod.intern_pool.remove(union_ty); + new_decl.ty = Type.type; new_decl.val = union_ty.toValue(); new_namespace.ty = union_ty.toType(); @@ -3312,8 +3315,8 @@ fn zirOpaqueDecl( // type gains an InternPool index. const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = undefined, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, small.name_strategy, "opaque", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; @@ -3334,6 +3337,7 @@ fn zirOpaqueDecl( // TODO: figure out InternPool removals for incremental compilation //errdefer mod.intern_pool.remove(opaque_ty); + new_decl.ty = Type.type; new_decl.val = opaque_ty.toValue(); new_namespace.ty = opaque_ty.toType(); @@ -19433,8 +19437,8 @@ fn zirReify( // an InternPool index. const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = undefined, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name_strategy, "enum", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; @@ -19459,6 +19463,7 @@ fn zirReify( // TODO: figure out InternPool removals for incremental compilation //errdefer ip.remove(incomplete_enum.index); + new_decl.ty = Type.type; new_decl.val = incomplete_enum.index.toValue(); for (0..fields_len) |field_i| { @@ -19527,8 +19532,8 @@ fn zirReify( // after the opaque type gains an InternPool index. const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = undefined, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name_strategy, "opaque", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; @@ -19552,6 +19557,7 @@ fn zirReify( // TODO: figure out InternPool removals for incremental compilation //errdefer ip.remove(opaque_ty); + new_decl.ty = Type.type; new_decl.val = opaque_ty.toValue(); new_namespace.ty = opaque_ty.toType(); @@ -19585,8 +19591,8 @@ fn zirReify( // InternPool index. const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = undefined, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name_strategy, "union", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; @@ -19629,6 +19635,7 @@ fn zirReify( // TODO: figure out InternPool removals for incremental compilation //errdefer ip.remove(union_ty); + new_decl.ty = Type.type; new_decl.val = union_ty.toValue(); new_namespace.ty = union_ty.toType(); @@ -19886,8 +19893,8 @@ fn reifyStruct( // InternPool index. const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = undefined, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name_strategy, "struct", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; @@ -19924,6 +19931,7 @@ fn reifyStruct( // TODO: figure out InternPool removals for incremental compilation //errdefer ip.remove(struct_ty); + new_decl.ty = Type.type; new_decl.val = struct_ty.toValue(); new_namespace.ty = struct_ty.toType(); @@ -33441,8 +33449,8 @@ fn generateUnionTagTypeNumbered( break :name try ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ - .ty = Type.type, - .val = undefined, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name); errdefer mod.abortAnonDecl(new_decl_index); @@ -33463,6 +33471,7 @@ fn generateUnionTagTypeNumbered( .tag_mode = .explicit, } }); + new_decl.ty = Type.type; new_decl.val = enum_ty.toValue(); try mod.finalizeAnonDecl(new_decl_index); @@ -33482,8 +33491,8 @@ fn generateUnionTagTypeSimple( const new_decl_index = new_decl_index: { const union_obj = maybe_union_obj orelse { break :new_decl_index try mod.createAnonymousDecl(block, .{ - .ty = Type.type, - .val = undefined, + .ty = Type.noreturn, + .val = Value.@"unreachable", }); }; const src_decl = mod.declPtr(block.src_decl); @@ -33501,8 +33510,8 @@ fn generateUnionTagTypeSimple( break :name try ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ - .ty = Type.type, - .val = undefined, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name); mod.declPtr(new_decl_index).name_fully_qualified = true; break :new_decl_index new_decl_index; @@ -33523,6 +33532,7 @@ fn generateUnionTagTypeSimple( const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; + new_decl.ty = Type.type; new_decl.val = enum_ty.toValue(); try mod.finalizeAnonDecl(new_decl_index); From 44d8cf9331218653c283a930bbc74e6871fe1701 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 4 Jun 2023 10:26:01 -0400 Subject: [PATCH 185/205] wasm: address behavior test regressions --- src/arch/wasm/CodeGen.zig | 28 ++++++++++++++++++++++------ test/behavior/bugs/1381.zig | 1 + 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 9b7ba19c13a1..4c1d5b4081e0 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2960,10 +2960,21 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { const offset = index * elem_type.abiSize(mod); const array_ptr = try func.lowerParentPtr(elem.base.toValue()); - return WValue{ .memory_offset = .{ - .pointer = array_ptr.memory, - .offset = @intCast(u32, offset), - } }; + return switch (array_ptr) { + .memory => |ptr_| WValue{ + .memory_offset = .{ + .pointer = ptr_, + .offset = @intCast(u32, offset), + }, + }, + .memory_offset => |mem_off| WValue{ + .memory_offset = .{ + .pointer = mem_off.pointer, + .offset = @intCast(u32, offset) + mem_off.offset, + }, + }, + else => unreachable, + }; }, .field => |field| { const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); @@ -3253,7 +3264,12 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, else => unreachable, }, - .un => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), + .un => |union_obj| { + // in this case we have a packed union which will not be passed by reference. + const field_index = ty.unionTagFieldIndex(union_obj.tag.toValue(), func.bin_file.base.options.module.?).?; + const field_ty = ty.unionFields(mod).values()[field_index].ty; + return func.lowerConstant(union_obj.val.toValue(), field_ty); + }, .memoized_call => unreachable, } } @@ -7173,7 +7189,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :val try WValue.toLocal(.stack, func, result_ty); }; - return func.finishAir(inst, result_ptr, &.{ extra.ptr, extra.new_value, extra.expected_value }); + return func.finishAir(inst, result_ptr, &.{ extra.ptr, extra.expected_value, extra.new_value }); } fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { diff --git a/test/behavior/bugs/1381.zig b/test/behavior/bugs/1381.zig index 90941de34127..f35c963df3bb 100644 --- a/test/behavior/bugs/1381.zig +++ b/test/behavior/bugs/1381.zig @@ -17,6 +17,7 @@ test "union that needs padding bytes inside an array" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; var as = [_]A{ A{ .B = B{ .D = 1 } }, From f94c66825e5dbaf83cea8cdbf57e57438c938b06 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 10 Jun 2023 17:46:03 -0400 Subject: [PATCH 186/205] langref: fix error set order --- doc/langref.html.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index 14dda686a94a..6740d147bd41 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -10176,7 +10176,7 @@ pub fn main() void { {#header_open|Invalid Error Set Cast#}

At compile-time:

- {#code_begin|test_err|test_comptime_invalid_error_set_cast|'error.B' not a member of error set 'error{A,C}'#} + {#code_begin|test_err|test_comptime_invalid_error_set_cast|'error.B' not a member of error set 'error{C,A}'#} const Set1 = error{ A, B, From a01bc7776fe3d987e7e9e6346c43480ac260c40e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 10 Jun 2023 17:46:17 -0400 Subject: [PATCH 187/205] llvm: fix name lifetime --- src/codegen/llvm.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 723a55002703..ab4c7581bf87 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1169,13 +1169,14 @@ pub const Object = struct { llvm.DIFlags.NoReturn else 0; + const decl_di_ty = try o.lowerDebugType(decl.ty, .full); const subprogram = dib.createFunction( di_file.?.toScope(), mod.intern_pool.stringToSlice(decl.name), llvm_func.getValueName(), di_file.?, line_number, - try o.lowerDebugType(decl.ty, .full), + decl_di_ty, is_internal_linkage, true, // is definition line_number + func.lbrace_line, // scope line From b9a4eae34900958bad7d477cfde1e7ddb4f8be92 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 11 Jun 2023 00:15:08 -0400 Subject: [PATCH 188/205] llvm: fix more name lifetimes Hopefully this also fixes the non-reproducing CI failures. --- src/codegen/llvm.zig | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ab4c7581bf87..8eb5f8c6833f 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2198,6 +2198,7 @@ pub const Object = struct { const field_size = field.ty.abiSize(mod); const field_align = field.normalAlignment(mod); + const field_di_ty = try o.lowerDebugType(field.ty, .full); di_fields.appendAssumeCapacity(dib.createMemberType( fwd_decl.toScope(), mod.intern_pool.stringToSlice(field_name), @@ -2207,7 +2208,7 @@ pub const Object = struct { field_align * 8, // align in bits 0, // offset in bits 0, // flags - try o.lowerDebugType(field.ty, .full), + field_di_ty, )); } @@ -2377,8 +2378,9 @@ pub const Object = struct { const mod = o.module; const decl = mod.declPtr(decl_index); const fields: [0]*llvm.DIType = .{}; + const di_scope = try o.namespaceToDebugScope(decl.src_namespace); return o.di_builder.?.createStructType( - try o.namespaceToDebugScope(decl.src_namespace), + di_scope, mod.intern_pool.stringToSlice(decl.name), // TODO use fully qualified name try o.getDIFile(o.gpa, mod.namespacePtr(decl.src_namespace).file_scope), decl.src_line + 1, @@ -5940,8 +5942,6 @@ pub const FuncGen = struct { .base_line = self.base_line, }); - const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); - const is_internal_linkage = !mod.decl_exports.contains(decl_index); const fn_ty = try mod.funcType(.{ .param_types = &.{}, @@ -5958,13 +5958,15 @@ pub const FuncGen = struct { .section_is_generic = false, .addrspace_is_generic = false, }); + const fn_di_ty = try self.dg.object.lowerDebugType(fn_ty, .full); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const subprogram = dib.createFunction( di_file.toScope(), mod.intern_pool.stringToSlice(decl.name), fqn, di_file, line_number, - try self.dg.object.lowerDebugType(fn_ty, .full), + fn_di_ty, is_internal_linkage, true, // is definition line_number + func.lbrace_line, // scope line From 0ec012e0c080e1686e270f033ef5aa2d74c7cc9f Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 11 Jun 2023 00:41:46 -0400 Subject: [PATCH 189/205] TypedValue: fix code formatting --- src/TypedValue.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 723207eeffbe..26bf25bbac47 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -392,7 +392,7 @@ pub fn print( const sent = ty.sentinel(mod) orelse break :blk false; break :blk sent.eql(Value.zero_u8, Type.u8, mod); }; - const str = if (zero_sent) bytes[0..bytes.len - 1] else bytes; + const str = if (zero_sent) bytes[0 .. bytes.len - 1] else bytes; return writer.print("\"{}\"", .{std.zig.fmtEscapes(str)}); }, .elems, .repeated_elem => return printAggregate(ty, val, writer, level, mod), From 7e5dea6366fa194b54cc391ba48c18754df198e7 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 11 Jun 2023 01:02:52 -0400 Subject: [PATCH 190/205] Sema: fix `std.builtin.Type.EnumField.value` when not auto-numbered --- src/Sema.zig | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 99b6c1dba667..5cec8c19f9d8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -16418,9 +16418,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Enum => { // TODO: look into memoizing this result. - const enum_type = ip.indexToKey(ty.toIntern()).enum_type; - - const is_exhaustive = Value.makeBool(enum_type.tag_mode != .nonexhaustive); + const is_exhaustive = Value.makeBool(ip.indexToKey(ty.toIntern()).enum_type.tag_mode != .nonexhaustive); var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); @@ -16438,10 +16436,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t enum_field_ty_decl.val.toType(); }; - const enum_field_vals = try sema.arena.alloc(InternPool.Index, enum_type.names.len); + const enum_field_vals = try sema.arena.alloc(InternPool.Index, ip.indexToKey(ty.toIntern()).enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; + const value_val = if (enum_type.values.len > 0) + try mod.intern_pool.getCoerced(gpa, enum_type.values[i], .comptime_int_type) + else + try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .u64 = @intCast(u64, i) }, + } }); // TODO: write something like getCoercedInts to avoid needing to dupe - const name = try sema.arena.dupe(u8, ip.stringToSlice(ip.indexToKey(ty.toIntern()).enum_type.names[i])); + const name = try sema.arena.dupe(u8, ip.stringToSlice(enum_type.names[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16468,7 +16474,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // value: comptime_int, - (try mod.intValue(Type.comptime_int, i)).toIntern(), + value_val, }; field_val.* = try mod.intern(.{ .aggregate = .{ .ty = enum_field_ty.toIntern(), @@ -16503,7 +16509,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, enum_type.namespace); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ip.indexToKey(ty.toIntern()).enum_type.namespace); const type_enum_ty = t: { const type_enum_ty_decl_index = (try sema.namespaceLookup( @@ -16520,7 +16526,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = .{ // tag_type: type, - enum_type.tag_ty, + ip.indexToKey(ty.toIntern()).enum_type.tag_ty, // fields: []const EnumField, fields_val, // decls: []const Declaration, From 7507a76879bbfb53c170f130ea836b10bc2a42e1 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 11 Jun 2023 03:10:54 -0400 Subject: [PATCH 191/205] link: use `Wasm.string_table` offsets for `Wasm.undefs` keys This avoids having dangling pointers into `InternPool.string_bytes`. --- src/link/Wasm.zig | 60 +++++++++++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 23 deletions(-) diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index d57543542a7c..35972d2fec1b 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -149,7 +149,8 @@ discarded: std.AutoHashMapUnmanaged(SymbolLoc, SymbolLoc) = .{}, /// into the final binary. resolved_symbols: std.AutoArrayHashMapUnmanaged(SymbolLoc, void) = .{}, /// Symbols that remain undefined after symbol resolution. -undefs: std.StringArrayHashMapUnmanaged(SymbolLoc) = .{}, +/// Note: The key represents an offset into the string table, rather than the actual string. +undefs: std.AutoArrayHashMapUnmanaged(u32, SymbolLoc) = .{}, /// Maps a symbol's location to an atom. This can be used to find meta /// data of a symbol, such as its size, or its offset to perform a relocation. /// Undefined (and synthetic) symbols do not have an Atom and therefore cannot be mapped. @@ -514,6 +515,10 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Wasm { /// Leaves index undefined and the default flags (0). fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !SymbolLoc { const name_offset = try wasm.string_table.put(wasm.base.allocator, name); + return wasm.createSyntheticSymbolOffset(name_offset, tag); +} + +fn createSyntheticSymbolOffset(wasm: *Wasm, name_offset: u32, tag: Symbol.Tag) !SymbolLoc { const sym_index = @intCast(u32, wasm.symbols.items.len); const loc: SymbolLoc = .{ .index = sym_index, .file = null }; try wasm.symbols.append(wasm.base.allocator, .{ @@ -691,7 +696,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void { try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, location, {}); if (symbol.isUndefined()) { - try wasm.undefs.putNoClobber(wasm.base.allocator, sym_name, location); + try wasm.undefs.putNoClobber(wasm.base.allocator, sym_name_index, location); } continue; } @@ -801,7 +806,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void { try wasm.resolved_symbols.put(wasm.base.allocator, location, {}); assert(wasm.resolved_symbols.swapRemove(existing_loc)); if (existing_sym.isUndefined()) { - _ = wasm.undefs.swapRemove(sym_name); + _ = wasm.undefs.swapRemove(sym_name_index); } } } @@ -812,15 +817,16 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void { log.debug("Resolving symbols in archives", .{}); var index: u32 = 0; undef_loop: while (index < wasm.undefs.count()) { - const sym_name = wasm.undefs.keys()[index]; + const sym_name_index = wasm.undefs.keys()[index]; for (wasm.archives.items) |archive| { + const sym_name = wasm.string_table.get(sym_name_index); + log.debug("Detected symbol '{s}' in archive '{s}', parsing objects..", .{ sym_name, archive.name }); const offset = archive.toc.get(sym_name) orelse { // symbol does not exist in this archive continue; }; - log.debug("Detected symbol '{s}' in archive '{s}', parsing objects..", .{ sym_name, archive.name }); // Symbol is found in unparsed object file within current archive. // Parse object and and resolve symbols again before we check remaining // undefined symbols. @@ -1191,28 +1197,36 @@ fn validateFeatures( /// if one or multiple undefined references exist. When none exist, the symbol will /// not be created, ensuring we don't unneccesarily emit unreferenced symbols. fn resolveLazySymbols(wasm: *Wasm) !void { - if (wasm.undefs.fetchSwapRemove("__heap_base")) |kv| { - const loc = try wasm.createSyntheticSymbol("__heap_base", .data); - try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); - _ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations. + if (wasm.string_table.getOffset("__heap_base")) |name_offset| { + if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| { + const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data); + try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); + _ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations. + } } - if (wasm.undefs.fetchSwapRemove("__heap_end")) |kv| { - const loc = try wasm.createSyntheticSymbol("__heap_end", .data); - try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); - _ = wasm.resolved_symbols.swapRemove(loc); + if (wasm.string_table.getOffset("__heap_end")) |name_offset| { + if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| { + const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data); + try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); + _ = wasm.resolved_symbols.swapRemove(loc); + } } if (!wasm.base.options.shared_memory) { - if (wasm.undefs.fetchSwapRemove("__tls_base")) |kv| { - const loc = try wasm.createSyntheticSymbol("__tls_base", .global); - try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); + if (wasm.string_table.getOffset("__tls_base")) |name_offset| { + if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| { + const loc = try wasm.createSyntheticSymbolOffset(name_offset, .global); + try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); + } } } - if (wasm.undefs.fetchSwapRemove("__zig_errors_len")) |kv| { - const loc = try wasm.createSyntheticSymbol("__zig_errors_len", .data); - try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); - _ = wasm.resolved_symbols.swapRemove(kv.value); + if (wasm.string_table.getOffset("__zig_errors_len")) |name_offset| { + if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| { + const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data); + try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc); + _ = wasm.resolved_symbols.swapRemove(kv.value); + } } } @@ -1611,7 +1625,7 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !u3 wasm.symbols.items[sym_index] = symbol; gop.value_ptr.* = .{ .index = sym_index, .file = null }; try wasm.resolved_symbols.put(wasm.base.allocator, gop.value_ptr.*, {}); - try wasm.undefs.putNoClobber(wasm.base.allocator, name, gop.value_ptr.*); + try wasm.undefs.putNoClobber(wasm.base.allocator, name_index, gop.value_ptr.*); return sym_index; } @@ -1769,7 +1783,7 @@ pub fn updateDeclExports( // if the symbol was previously undefined, remove it as an import _ = wasm.imports.remove(sym_loc); - _ = wasm.undefs.swapRemove(mod.intern_pool.stringToSlice(exp.name)); + _ = wasm.undefs.swapRemove(export_name); } } @@ -1885,7 +1899,7 @@ pub fn addOrUpdateImport( const loc: SymbolLoc = .{ .file = null, .index = symbol_index }; global_gop.value_ptr.* = loc; try wasm.resolved_symbols.put(wasm.base.allocator, loc, {}); - try wasm.undefs.putNoClobber(wasm.base.allocator, full_name, loc); + try wasm.undefs.putNoClobber(wasm.base.allocator, decl_name_index, loc); } if (type_index) |ty_index| { From 2afc689060e1d14e039f3c439d42f22ba09768a3 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 11 Jun 2023 04:14:17 -0400 Subject: [PATCH 192/205] Sema: fix condition for emitting noreturn safety check --- src/Sema.zig | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 5cec8c19f9d8..21c0402c041c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7170,23 +7170,24 @@ fn analyzeCall( try sema.ensureResultUsed(block, sema.typeOf(func_inst), call_src); } return sema.handleTailCall(block, call_src, func_ty, func_inst); - } else if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) { + } + if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) skip_safety: { // Function pointers and extern functions aren't guaranteed to // actually be noreturn so we add a safety check for them. - check: { - const func_val = (try sema.resolveMaybeUndefVal(func)) orelse break :check; + if (try sema.resolveMaybeUndefVal(func)) |func_val| { switch (mod.intern_pool.indexToKey(func_val.toIntern())) { - .func, .extern_func, .ptr => { - _ = try block.addNoOp(.unreach); - return Air.Inst.Ref.unreachable_value; + .func => break :skip_safety, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| if (!mod.declPtr(decl).isExtern(mod)) break :skip_safety, + else => {}, }, - else => break :check, + else => {}, } } - try sema.safetyPanic(block, .noreturn_returned); return Air.Inst.Ref.unreachable_value; - } else if (func_ty_info.return_type == .noreturn_type) { + } + if (func_ty_info.return_type == .noreturn_type) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } From 63604024f47767b7b0c0deba5c9647cd6c040931 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 11 Jun 2023 13:15:37 +0100 Subject: [PATCH 193/205] stage2: fix InternPool compile errors on 32-bit targets --- src/InternPool.zig | 12 ++++++------ src/Sema.zig | 19 +++++++++++-------- src/TypedValue.zig | 4 ++-- src/arch/wasm/CodeGen.zig | 4 ++-- src/codegen/c.zig | 2 +- src/value.zig | 11 ++++++----- 6 files changed, 28 insertions(+), 24 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index a46f765ad5bb..9ca5a48a55ab 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3824,13 +3824,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(child == .u8_type); if (bytes.len != len) { assert(bytes.len == len_including_sentinel); - assert(bytes[len] == ip.indexToKey(sentinel).int.storage.u64); + assert(bytes[@intCast(usize, len)] == ip.indexToKey(sentinel).int.storage.u64); } }, .elems => |elems| { if (elems.len != len) { assert(elems.len == len_including_sentinel); - assert(elems[len] == sentinel); + assert(elems[@intCast(usize, len)] == sentinel); } }, .repeated_elem => |elem| { @@ -3936,7 +3936,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { if (child == .u8_type) bytes: { const string_bytes_index = ip.string_bytes.items.len; - try ip.string_bytes.ensureUnusedCapacity(gpa, len_including_sentinel + 1); + try ip.string_bytes.ensureUnusedCapacity(gpa, @intCast(usize, len_including_sentinel + 1)); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); switch (aggregate.storage) { .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes), @@ -3953,7 +3953,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .repeated_elem => |elem| switch (ip.indexToKey(elem)) { .undef => break :bytes, .int => |int| @memset( - ip.string_bytes.addManyAsSliceAssumeCapacity(len), + ip.string_bytes.addManyAsSliceAssumeCapacity(@intCast(usize, len)), @intCast(u8, int.storage.u64), ), else => unreachable, @@ -3967,7 +3967,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const string = if (has_internal_null) @intToEnum(String, string_bytes_index) else - (try ip.getOrPutTrailingString(gpa, len_including_sentinel)).toString(); + (try ip.getOrPutTrailingString(gpa, @intCast(usize, len_including_sentinel))).toString(); ip.items.appendAssumeCapacity(.{ .tag = .bytes, .data = ip.addExtraAssumeCapacity(Bytes{ @@ -3980,7 +3980,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { try ip.extra.ensureUnusedCapacity( gpa, - @typeInfo(Tag.Aggregate).Struct.fields.len + len_including_sentinel, + @typeInfo(Tag.Aggregate).Struct.fields.len + @intCast(usize, len_including_sentinel), ); ip.items.appendAssumeCapacity(.{ .tag = .aggregate, diff --git a/src/Sema.zig b/src/Sema.zig index 21c0402c041c..8d733dfb3c6a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -28186,11 +28186,12 @@ fn beginComptimePtrMutation( const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); return .{ .mut_decl = parent.mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = val_ptr, - .byte_offset = elem_abi_size * elem_ptr.index, + .byte_offset = elem_abi_size * elem_idx, } }, .ty = parent.ty, }; @@ -28223,7 +28224,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &elems[elem_ptr.index], + &elems[@intCast(usize, elem_ptr.index)], ptr_elem_ty, parent.mut_decl, ); @@ -28254,7 +28255,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &elems[elem_ptr.index], + &elems[@intCast(usize, elem_ptr.index)], ptr_elem_ty, parent.mut_decl, ); @@ -28265,7 +28266,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], + &val_ptr.castTag(.aggregate).?.data[@intCast(usize, elem_ptr.index)], ptr_elem_ty, parent.mut_decl, ), @@ -28291,7 +28292,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &elems[elem_ptr.index], + &elems[@intCast(usize, elem_ptr.index)], ptr_elem_ty, parent.mut_decl, ); @@ -28331,11 +28332,12 @@ fn beginComptimePtrMutation( const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); return ComptimePtrMutationKit{ .mut_decl = parent.mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, + .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_idx, } }, .ty = parent.ty, }; @@ -28750,9 +28752,10 @@ fn beginComptimePtrLoad( // the pointee array directly from our parent array. if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) { const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); + const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ .ty = try Type.array(sema.arena, N, null, elem_ty, mod), - .val = try array_tv.val.sliceArray(mod, sema.arena, elem_ptr.index, elem_ptr.index + N), + .val = try array_tv.val.sliceArray(mod, sema.arena, elem_idx, elem_idx + N), } else null; break :blk deref; } @@ -28773,7 +28776,7 @@ fn beginComptimePtrLoad( } deref.pointee = TypedValue{ .ty = elem_ty, - .val = try array_tv.val.elemValue(mod, elem_ptr.index), + .val = try array_tv.val.elemValue(mod, @intCast(usize, elem_ptr.index)), }; break :blk deref; }, diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 26bf25bbac47..7faff3af019e 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -356,12 +356,12 @@ pub fn print( if (container_ty.isTuple(mod)) { try writer.print("[{d}]", .{field.index}); } - const field_name_ip = container_ty.structFieldName(field.index, mod); + const field_name_ip = container_ty.structFieldName(@intCast(usize, field.index), mod); const field_name = mod.intern_pool.stringToSlice(field_name_ip); try writer.print(".{}", .{std.zig.fmtId(field_name)}); }, .Union => { - const field_name_ip = container_ty.unionFields(mod).keys()[field.index]; + const field_name_ip = container_ty.unionFields(mod).keys()[@intCast(usize, field.index)]; const field_name = mod.intern_pool.stringToSlice(field_name_ip); try writer.print(".{}", .{std.zig.fmtId(field_name)}); }, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 4c1d5b4081e0..877db4b62303 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2982,8 +2982,8 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { const offset = switch (parent_ty.zigTypeTag(mod)) { .Struct => switch (parent_ty.containerLayout(mod)) { - .Packed => parent_ty.packedStructFieldByteOffset(field.index, mod), - else => parent_ty.structFieldOffset(field.index, mod), + .Packed => parent_ty.packedStructFieldByteOffset(@intCast(usize, field.index), mod), + else => parent_ty.structFieldOffset(@intCast(usize, field.index), mod), }, .Union => switch (parent_ty.containerLayout(mod)) { .Packed => 0, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 7b091d682362..8d2ba2bbb880 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -642,7 +642,7 @@ pub const DeclGen = struct { // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(base_ty, .complete); const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) { - .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(field.index, mod), + .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@intCast(usize, field.index), mod), .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .One, .Many, .C => unreachable, .Slice => switch (field.index) { diff --git a/src/value.zig b/src/value.zig index 6f603c248e66..dbf25324e5cc 100644 --- a/src/value.zig +++ b/src/value.zig @@ -395,7 +395,8 @@ pub const Value = struct { } }); }, .aggregate => { - const old_elems = val.castTag(.aggregate).?.data[0..ty.arrayLen(mod)]; + const len = @intCast(usize, ty.arrayLen(mod)); + const old_elems = val.castTag(.aggregate).?.data[0..len]; const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); defer mod.gpa.free(new_elems); const ty_key = mod.intern_pool.indexToKey(ty.toIntern()); @@ -642,7 +643,7 @@ pub const Value = struct { const base_addr = (try field.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; const struct_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); - return base_addr + struct_ty.structFieldOffset(field.index, mod); + return base_addr + struct_ty.structFieldOffset(@intCast(usize, field.index), mod); }, else => null, }, @@ -1798,10 +1799,10 @@ pub const Value = struct { .int, .eu_payload => unreachable, .opt_payload => |base| base.toValue().elemValue(mod, index), .comptime_field => |field_val| field_val.toValue().elemValue(mod, index), - .elem => |elem| elem.base.toValue().elemValue(mod, index + elem.index), + .elem => |elem| elem.base.toValue().elemValue(mod, index + @intCast(usize, elem.index)), .field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| { const base_decl = mod.declPtr(decl_index); - const field_val = try base_decl.val.fieldValue(mod, field.index); + const field_val = try base_decl.val.fieldValue(mod, @intCast(usize, field.index)); return field_val.elemValue(mod, index); } else unreachable, }, @@ -1921,7 +1922,7 @@ pub const Value = struct { .comptime_field => |comptime_field| comptime_field.toValue() .sliceArray(mod, arena, start, end), .elem => |elem| elem.base.toValue() - .sliceArray(mod, arena, start + elem.index, end + elem.index), + .sliceArray(mod, arena, start + @intCast(usize, elem.index), end + @intCast(usize, elem.index)), else => unreachable, }, .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ From 5b6906c22eb44b35cdce0368a36b035d6734df04 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 11 Jun 2023 13:17:47 +0100 Subject: [PATCH 194/205] InternPool: fix dbHelper after 4976b58 You must now write '_ = &f' rather than just '_ = f' to ensure a function is compiled into a binary. --- src/InternPool.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 9ca5a48a55ab..0f0b40ba4da3 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1498,7 +1498,7 @@ pub const Index = enum(u32) { comptime { if (builtin.mode == .Debug) { - _ = dbHelper; + _ = &dbHelper; } } }; From 54460e39ace2140e6bfcb0bf4ae1709d128f9e8d Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 11 Jun 2023 14:51:08 +0100 Subject: [PATCH 195/205] Autodoc: make it work under InternPool --- src/Autodoc.zig | 45 +++++++++++++++++++++++---------------------- src/Compilation.zig | 1 - src/type.zig | 28 +++++++++++++++------------- 3 files changed, 38 insertions(+), 36 deletions(-) diff --git a/src/Autodoc.zig b/src/Autodoc.zig index c20c5771dd42..1cdb76831130 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -8,6 +8,7 @@ const CompilationModule = @import("Module.zig"); const File = CompilationModule.File; const Module = @import("Package.zig"); const Tokenizer = std.zig.Tokenizer; +const InternPool = @import("InternPool.zig"); const Zir = @import("Zir.zig"); const Ref = Zir.Inst.Ref; const log = std.log.scoped(.autodoc); @@ -106,18 +107,20 @@ pub fn generateZirData(self: *Autodoc) !void { const file = self.comp_module.import_table.get(abs_root_src_path).?; // file is expected to be present in the import table // Append all the types in Zir.Inst.Ref. { - try self.types.append(self.arena, .{ - .ComptimeExpr = .{ .name = "ComptimeExpr" }, - }); - - // this skips Ref.none but it's ok becuse we replaced it with ComptimeExpr - var i: u32 = 1; - while (i <= @enumToInt(Ref.anyerror_void_error_union_type)) : (i += 1) { + comptime std.debug.assert(@enumToInt(InternPool.Index.first_type) == 0); + var i: u32 = 0; + while (i <= @enumToInt(InternPool.Index.last_type)) : (i += 1) { + const ip_index = @intToEnum(InternPool.Index, i); var tmpbuf = std.ArrayList(u8).init(self.arena); - try Ref.typed_value_map[i].val.fmtDebug().format("", .{}, tmpbuf.writer()); + if (ip_index == .generic_poison_type) { + // Not a real type, doesn't have a normal name + try tmpbuf.writer().writeAll("(generic poison)"); + } else { + try ip_index.toType().fmt(self.comp_module).format("", .{}, tmpbuf.writer()); + } try self.types.append( self.arena, - switch (@intToEnum(Ref, i)) { + switch (ip_index) { else => blk: { // TODO: map the remaining refs to a correct type // instead of just assinging "array" to them. @@ -1038,7 +1041,7 @@ fn walkInstruction( .ret_load => { const un_node = data[inst_index].un_node; const res_ptr_ref = un_node.operand; - const res_ptr_inst = @enumToInt(res_ptr_ref) - Ref.typed_value_map.len; + const res_ptr_inst = Zir.refToIndex(res_ptr_ref).?; // TODO: this instruction doesn't let us know trivially if there's // branching involved or not. For now here's the strat: // We search backwarts until `ret_ptr` for `store_node`, @@ -2155,11 +2158,10 @@ fn walkInstruction( const lhs_ref = blk: { var lhs_extra = extra; while (true) { - if (@enumToInt(lhs_extra.data.lhs) < Ref.typed_value_map.len) { + const lhs = Zir.refToIndex(lhs_extra.data.lhs) orelse { break :blk lhs_extra.data.lhs; - } + }; - const lhs = @enumToInt(lhs_extra.data.lhs) - Ref.typed_value_map.len; if (tags[lhs] != .field_val and tags[lhs] != .field_ptr and tags[lhs] != .field_type) break :blk lhs_extra.data.lhs; @@ -2186,8 +2188,7 @@ fn walkInstruction( // TODO: double check that we really don't need type info here const wr = blk: { - if (@enumToInt(lhs_ref) >= Ref.typed_value_map.len) { - const lhs_inst = @enumToInt(lhs_ref) - Ref.typed_value_map.len; + if (Zir.refToIndex(lhs_ref)) |lhs_inst| { if (tags[lhs_inst] == .call or tags[lhs_inst] == .field_call) { break :blk DocData.WalkResult{ .expr = .{ @@ -4670,16 +4671,19 @@ fn walkRef( ref: Ref, need_type: bool, // true when the caller needs also a typeRef for the return value ) AutodocErrors!DocData.WalkResult { - const enum_value = @enumToInt(ref); - if (enum_value <= @enumToInt(Ref.anyerror_void_error_union_type)) { + if (ref == .none) { + return .{ .expr = .{ .comptimeExpr = 0 } }; + } else if (@enumToInt(ref) <= @enumToInt(InternPool.Index.last_type)) { // We can just return a type that indexes into `types` with the // enum value because in the beginning we pre-filled `types` with // the types that are listed in `Ref`. return DocData.WalkResult{ .typeRef = .{ .type = @enumToInt(std.builtin.TypeId.Type) }, - .expr = .{ .type = enum_value }, + .expr = .{ .type = @enumToInt(ref) }, }; - } else if (enum_value < Ref.typed_value_map.len) { + } else if (Zir.refToIndex(ref)) |zir_index| { + return self.walkInstruction(file, parent_scope, parent_src, zir_index, need_type); + } else { switch (ref) { else => { panicWithContext( @@ -4772,9 +4776,6 @@ fn walkRef( // } }; // }, } - } else { - const zir_index = enum_value - Ref.typed_value_map.len; - return self.walkInstruction(file, parent_scope, parent_src, zir_index, need_type); } } diff --git a/src/Compilation.zig b/src/Compilation.zig index 64f947c3c338..9397bc93a90f 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2074,7 +2074,6 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void if (!build_options.only_c and !build_options.only_core_functionality) { if (comp.emit_docs) |doc_location| { if (comp.bin_file.options.module) |module| { - if (true) @panic("TODO: get autodoc working again in this branch"); var autodoc = Autodoc.init(module, doc_location); defer autodoc.deinit(); try autodoc.generateZirData(); diff --git a/src/type.zig b/src/type.zig index a6997bc59068..f9065a2e39d7 100644 --- a/src/type.zig +++ b/src/type.zig @@ -315,23 +315,25 @@ pub const Type = struct { .comptime_float, .noreturn, => return writer.writeAll(@tagName(s)), + .null, .undefined, => try writer.print("@TypeOf({s})", .{@tagName(s)}), + .enum_literal => try writer.print("@TypeOf(.{s})", .{@tagName(s)}), - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .generic_poison, - => unreachable, + .atomic_order => try writer.writeAll("std.builtin.AtomicOrder"), + .atomic_rmw_op => try writer.writeAll("std.builtin.AtomicRmwOp"), + .calling_convention => try writer.writeAll("std.builtin.CallingConvention"), + .address_space => try writer.writeAll("std.builtin.AddressSpace"), + .float_mode => try writer.writeAll("std.builtin.FloatMode"), + .reduce_op => try writer.writeAll("std.builtin.ReduceOp"), + .call_modifier => try writer.writeAll("std.builtin.CallModifier"), + .prefetch_options => try writer.writeAll("std.builtin.PrefetchOptions"), + .export_options => try writer.writeAll("std.builtin.ExportOptions"), + .extern_options => try writer.writeAll("std.builtin.ExternOptions"), + .type_info => try writer.writeAll("std.builtin.Type"), + + .generic_poison => unreachable, }, .struct_type => |struct_type| { if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { From d37ebfcf231c68a0430840c4fbe649dd0076ae1e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 12 Jun 2023 01:44:12 -0400 Subject: [PATCH 196/205] InternPool: avoid as many slices pointing to `string_bytes` as possible These are frequently invalidated whenever a string is interned, so avoid creating pointers to `string_bytes` wherever possible. This is an attempt to fix random CI failures. --- src/InternPool.zig | 46 ++- src/Module.zig | 122 +++---- src/Sema.zig | 802 +++++++++++++++++++++---------------------- src/TypedValue.zig | 66 ++-- src/codegen/c.zig | 8 +- src/codegen/llvm.zig | 37 +- src/link/C.zig | 2 +- src/link/Coff.zig | 25 +- src/link/Elf.zig | 8 +- src/link/MachO.zig | 12 +- src/link/Plan9.zig | 10 +- src/link/SpirV.zig | 2 +- src/link/Wasm.zig | 14 +- src/print_air.zig | 3 +- src/type.zig | 8 +- src/value.zig | 45 +-- 16 files changed, 572 insertions(+), 638 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 0f0b40ba4da3..d5cf2d3fbfc3 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -156,6 +156,35 @@ pub const NullTerminatedString = enum(u32) { _ = ctx; return @enumToInt(a) < @enumToInt(b); } + + pub fn toUnsigned(self: NullTerminatedString, ip: *const InternPool) ?u32 { + const s = ip.stringToSlice(self); + if (s.len > 1 and s[0] == '0') return null; + if (std.mem.indexOfScalar(u8, s, '_')) |_| return null; + return std.fmt.parseUnsigned(u32, s, 10) catch null; + } + + const FormatData = struct { + string: NullTerminatedString, + ip: *const InternPool, + }; + fn format( + data: FormatData, + comptime specifier: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + const s = data.ip.stringToSlice(data.string); + if (comptime std.mem.eql(u8, specifier, "")) { + try writer.writeAll(s); + } else if (comptime std.mem.eql(u8, specifier, "i")) { + try writer.print("{}", .{std.zig.fmtId(s)}); + } else @compileError("invalid format string '" ++ specifier ++ "' for '" ++ @typeName(NullTerminatedString) ++ "'"); + } + + pub fn fmt(self: NullTerminatedString, ip: *const InternPool) std.fmt.Formatter(format) { + return .{ .data = .{ .string = self, .ip = ip } }; + } }; /// An index into `string_bytes` which might be `none`. @@ -5252,10 +5281,9 @@ pub fn getOrPutString( gpa: Allocator, s: []const u8, ) Allocator.Error!NullTerminatedString { - const string_bytes = &ip.string_bytes; - try string_bytes.ensureUnusedCapacity(gpa, s.len + 1); - string_bytes.appendSliceAssumeCapacity(s); - string_bytes.appendAssumeCapacity(0); + try ip.string_bytes.ensureUnusedCapacity(gpa, s.len + 1); + ip.string_bytes.appendSliceAssumeCapacity(s); + ip.string_bytes.appendAssumeCapacity(0); return ip.getOrPutTrailingString(gpa, s.len + 1); } @@ -5265,10 +5293,12 @@ pub fn getOrPutStringFmt( comptime format: []const u8, args: anytype, ) Allocator.Error!NullTerminatedString { - const start = ip.string_bytes.items.len; - try ip.string_bytes.writer(gpa).print(format, args); - try ip.string_bytes.append(gpa, 0); - return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); + // ensure that references to string_bytes in args do not get invalidated + const len = std.fmt.count(format, args) + 1; + try ip.string_bytes.ensureUnusedCapacity(gpa, len); + ip.string_bytes.writer(undefined).print(format, args) catch unreachable; + ip.string_bytes.appendAssumeCapacity(0); + return ip.getOrPutTrailingString(gpa, len); } pub fn getOrPutStringOpt( diff --git a/src/Module.zig b/src/Module.zig index cb3e8884e309..61f39a327ad9 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -270,11 +270,7 @@ pub const GlobalEmitH = struct { pub const ErrorInt = u32; pub const Export = struct { - name: InternPool.NullTerminatedString, - linkage: std.builtin.GlobalLinkage, - section: InternPool.OptionalNullTerminatedString, - visibility: std.builtin.SymbolVisibility, - + opts: Options, src: LazySrcLoc, /// The Decl that performs the export. Note that this is *not* the Decl being exported. owner_decl: Decl.Index, @@ -292,6 +288,13 @@ pub const Export = struct { complete, }, + pub const Options = struct { + name: InternPool.NullTerminatedString, + linkage: std.builtin.GlobalLinkage = .Strong, + section: InternPool.OptionalNullTerminatedString = .none, + visibility: std.builtin.SymbolVisibility = .default, + }; + pub fn getSrcLoc(exp: Export, mod: *Module) SrcLoc { const src_decl = mod.declPtr(exp.src_decl); return .{ @@ -691,16 +694,15 @@ pub const Decl = struct { } pub fn renderFullyQualifiedName(decl: Decl, mod: *Module, writer: anytype) !void { - const unqualified_name = mod.intern_pool.stringToSlice(decl.name); if (decl.name_fully_qualified) { - return writer.writeAll(unqualified_name); + try writer.print("{}", .{decl.name.fmt(&mod.intern_pool)}); + } else { + try mod.namespacePtr(decl.src_namespace).renderFullyQualifiedName(mod, decl.name, writer); } - return mod.namespacePtr(decl.src_namespace).renderFullyQualifiedName(mod, unqualified_name, writer); } pub fn renderFullyQualifiedDebugName(decl: Decl, mod: *Module, writer: anytype) !void { - const unqualified_name = mod.intern_pool.stringToSlice(decl.name); - return mod.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(mod, unqualified_name, writer); + return mod.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(mod, decl.name, writer); } pub fn getFullyQualifiedName(decl: Decl, mod: *Module) !InternPool.NullTerminatedString { @@ -712,8 +714,7 @@ pub const Decl = struct { var ns: Namespace.Index = decl.src_namespace; while (true) { const namespace = mod.namespacePtr(ns); - const ns_decl_index = namespace.getDeclIndex(mod); - const ns_decl = mod.declPtr(ns_decl_index); + const ns_decl = mod.declPtr(namespace.getDeclIndex(mod)); count += ip.stringToSlice(ns_decl.name).len + 1; ns = namespace.parent.unwrap() orelse { count += namespace.file_scope.sub_file_path.len; @@ -1722,44 +1723,34 @@ pub const Namespace = struct { pub fn renderFullyQualifiedName( ns: Namespace, mod: *Module, - name: []const u8, + name: InternPool.NullTerminatedString, writer: anytype, ) @TypeOf(writer).Error!void { if (ns.parent.unwrap()) |parent| { - const decl_index = ns.getDeclIndex(mod); - const decl = mod.declPtr(decl_index); - const decl_name = mod.intern_pool.stringToSlice(decl.name); - try mod.namespacePtr(parent).renderFullyQualifiedName(mod, decl_name, writer); + const decl = mod.declPtr(ns.getDeclIndex(mod)); + try mod.namespacePtr(parent).renderFullyQualifiedName(mod, decl.name, writer); } else { try ns.file_scope.renderFullyQualifiedName(writer); } - if (name.len != 0) { - try writer.writeAll("."); - try writer.writeAll(name); - } + if (name != .empty) try writer.print(".{}", .{name.fmt(&mod.intern_pool)}); } /// This renders e.g. "std/fs.zig:Dir.OpenOptions" pub fn renderFullyQualifiedDebugName( ns: Namespace, mod: *Module, - name: []const u8, + name: InternPool.NullTerminatedString, writer: anytype, ) @TypeOf(writer).Error!void { - var separator_char: u8 = '.'; - if (ns.parent.unwrap()) |parent| { - const decl_index = ns.getDeclIndex(mod); - const decl = mod.declPtr(decl_index); - const decl_name = mod.intern_pool.stringToSlice(decl.name); - try mod.namespacePtr(parent).renderFullyQualifiedDebugName(mod, decl_name, writer); - } else { + const separator_char: u8 = if (ns.parent.unwrap()) |parent| sep: { + const decl = mod.declPtr(ns.getDeclIndex(mod)); + try mod.namespacePtr(parent).renderFullyQualifiedDebugName(mod, decl.name, writer); + break :sep '.'; + } else sep: { try ns.file_scope.renderFullyQualifiedDebugName(writer); - separator_char = ':'; - } - if (name.len != 0) { - try writer.writeByte(separator_char); - try writer.writeAll(name); - } + break :sep ':'; + }; + if (name != .empty) try writer.print("{c}{}", .{ separator_char, name.fmt(&mod.intern_pool) }); } pub fn getDeclIndex(ns: Namespace, mod: *Module) Decl.Index { @@ -4185,10 +4176,10 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void defer liveness.deinit(gpa); if (dump_air) { - const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); - std.debug.print("# Begin Function AIR: {s}:\n", .{fqn}); + const fqn = try decl.getFullyQualifiedName(mod); + std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(&mod.intern_pool)}); @import("print_air.zig").dump(mod, air, liveness); - std.debug.print("# End Function AIR: {s}\n\n", .{fqn}); + std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(&mod.intern_pool)}); } if (std.debug.runtime_safety) { @@ -4620,10 +4611,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return sema.fail(&block_scope, export_src, "export of inline function", .{}); } // The scope needs to have the decl in it. - const options: std.builtin.ExportOptions = .{ - .name = mod.intern_pool.stringToSlice(decl.name), - }; - try sema.analyzeExport(&block_scope, export_src, options, decl_index); + try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); } return type_changed or is_inline != prev_is_inline; } @@ -4720,10 +4708,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (decl.is_exported) { const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) }; // The scope needs to have the decl in it. - const options: std.builtin.ExportOptions = .{ - .name = mod.intern_pool.stringToSlice(decl.name), - }; - try sema.analyzeExport(&block_scope, export_src, options, decl_index); + try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); } return type_changed; @@ -5222,12 +5207,9 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err .parent_decl_node = decl.src_node, .lazy = .{ .token_offset = 1 }, }; - const msg = try ErrorMsg.create( - gpa, - src_loc, - "duplicate test name: {s}", - .{ip.stringToSlice(decl_name)}, - ); + const msg = try ErrorMsg.create(gpa, src_loc, "duplicate test name: {}", .{ + decl_name.fmt(&mod.intern_pool), + }); errdefer msg.destroy(gpa); try mod.failed_decls.putNoClobber(gpa, decl_index, msg); const other_src_loc = SrcLoc{ @@ -5417,16 +5399,16 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void } } if (mod.comp.bin_file.cast(link.File.Elf)) |elf| { - elf.deleteDeclExport(decl_index, exp.name); + elf.deleteDeclExport(decl_index, exp.opts.name); } if (mod.comp.bin_file.cast(link.File.MachO)) |macho| { - try macho.deleteDeclExport(decl_index, exp.name); + try macho.deleteDeclExport(decl_index, exp.opts.name); } if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| { wasm.deleteDeclExport(decl_index); } if (mod.comp.bin_file.cast(link.File.Coff)) |coff| { - coff.deleteDeclExport(decl_index, exp.name); + coff.deleteDeclExport(decl_index, exp.opts.name); } if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| { failed_kv.value.destroy(mod.gpa); @@ -5810,12 +5792,9 @@ pub fn createAnonymousDeclFromDecl( ) !Decl.Index { const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope); errdefer mod.destroyDecl(new_decl_index); - const ip = &mod.intern_pool; - // This protects the getOrPutStringFmt from reallocating src decl name while reading it. - try ip.string_bytes.ensureUnusedCapacity(mod.gpa, ip.stringToSlice(src_decl.name).len + 20); - const name = ip.getOrPutStringFmt(mod.gpa, "{s}__anon_{d}", .{ - ip.stringToSlice(src_decl.name), @enumToInt(new_decl_index), - }) catch unreachable; + const name = try mod.intern_pool.getOrPutStringFmt(mod.gpa, "{}__anon_{d}", .{ + src_decl.name.fmt(&mod.intern_pool), @enumToInt(new_decl_index), + }); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, tv, name); return new_decl_index; } @@ -6301,13 +6280,13 @@ pub fn processExports(mod: *Module) !void { const exported_decl = entry.key_ptr.*; const exports = entry.value_ptr.items; for (exports) |new_export| { - const gop = try symbol_exports.getOrPut(gpa, new_export.name); + const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name); if (gop.found_existing) { new_export.status = .failed_retryable; try mod.failed_exports.ensureUnusedCapacity(gpa, 1); const src_loc = new_export.getSrcLoc(mod); - const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {s}", .{ - mod.intern_pool.stringToSlice(new_export.name), + const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {}", .{ + new_export.opts.name.fmt(&mod.intern_pool), }); errdefer msg.destroy(gpa); const other_export = gop.value_ptr.*; @@ -6752,18 +6731,9 @@ pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Alloca } })).toType(); } -pub fn singleErrorSetType(mod: *Module, name: []const u8) Allocator.Error!Type { - const gpa = mod.gpa; - const ip = &mod.intern_pool; - return singleErrorSetTypeNts(mod, try ip.getOrPutString(gpa, name)); -} - -pub fn singleErrorSetTypeNts(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type { - const gpa = mod.gpa; - const ip = &mod.intern_pool; - const names = [1]InternPool.NullTerminatedString{name}; - const i = try ip.get(gpa, .{ .error_set_type = .{ .names = &names } }); - return i.toType(); +pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type { + const names: *const [1]InternPool.NullTerminatedString = &name; + return (try mod.intern_pool.get(mod.gpa, .{ .error_set_type = .{ .names = names } })).toType(); } /// Sorts `names` in place. diff --git a/src/Sema.zig b/src/Sema.zig index 8d733dfb3c6a..cc79578931b3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -309,17 +309,17 @@ pub const Block = struct { src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 }; break :blk src_loc; } else blk: { - const src_decl = sema.mod.declPtr(rt.block.src_decl); + const src_decl = mod.declPtr(rt.block.src_decl); break :blk rt.func_src.toSrcLoc(src_decl, mod); }; if (rt.return_ty.isGenericPoison()) { - return sema.mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{}); + return mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{}); } - try sema.mod.errNoteNonLazy( + try mod.errNoteNonLazy( src_loc, parent, prefix ++ "the function returns a comptime-only type '{}'", - .{rt.return_ty.fmt(sema.mod)}, + .{rt.return_ty.fmt(mod)}, ); try sema.explainWhyTypeIsComptime(parent, src_loc, rt.return_ty); }, @@ -2825,7 +2825,6 @@ fn createAnonymousDeclTypeNamed( ) !Decl.Index { const mod = sema.mod; const gpa = sema.gpa; - const ip = &mod.intern_pool; const namespace = block.namespace; const src_scope = block.wip_capture_scope; const src_decl = mod.declPtr(block.src_decl); @@ -2842,12 +2841,8 @@ fn createAnonymousDeclTypeNamed( // This name is also used as the key in the parent namespace so it cannot be // renamed. - // This ensureUnusedCapacity protects against the src_decl slice from being - // reallocated during the call to `getOrPutStringFmt`. - try ip.string_bytes.ensureUnusedCapacity(gpa, ip.stringToSlice(src_decl.name).len + - anon_prefix.len + 20); - const name = ip.getOrPutStringFmt(gpa, "{s}__{s}_{d}", .{ - ip.stringToSlice(src_decl.name), anon_prefix, @enumToInt(new_decl_index), + const name = mod.intern_pool.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{ + src_decl.name.fmt(&mod.intern_pool), anon_prefix, @enumToInt(new_decl_index), }) catch unreachable; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; @@ -2863,8 +2858,9 @@ fn createAnonymousDeclTypeNamed( var buf = std.ArrayList(u8).init(gpa); defer buf.deinit(); - try buf.appendSlice(ip.stringToSlice(mod.declPtr(block.src_decl).name)); - try buf.appendSlice("("); + + const writer = buf.writer(); + try writer.print("{}(", .{mod.declPtr(block.src_decl).name.fmt(&mod.intern_pool)}); var arg_i: usize = 0; for (fn_info.param_body) |zir_inst| switch (zir_tags[zir_inst]) { @@ -2878,8 +2874,8 @@ fn createAnonymousDeclTypeNamed( const arg_val = sema.resolveConstMaybeUndefVal(block, .unneeded, arg, "") catch return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null); - if (arg_i != 0) try buf.appendSlice(","); - try buf.writer().print("{}", .{arg_val.fmtValue(sema.typeOf(arg), sema.mod)}); + if (arg_i != 0) try writer.writeByte(','); + try writer.print("{}", .{arg_val.fmtValue(sema.typeOf(arg), sema.mod)}); arg_i += 1; continue; @@ -2887,8 +2883,8 @@ fn createAnonymousDeclTypeNamed( else => continue, }; - try buf.appendSlice(")"); - const name = try ip.getOrPutString(gpa, buf.items); + try writer.writeByte(')'); + const name = try mod.intern_pool.getOrPutString(gpa, buf.items); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2901,17 +2897,9 @@ fn createAnonymousDeclTypeNamed( .dbg_var_ptr, .dbg_var_val => { if (zir_data[i].str_op.operand != ref) continue; - // This ensureUnusedCapacity protects against the src_decl - // slice from being reallocated during the call to - // `getOrPutStringFmt`. - const zir_str = zir_data[i].str_op.getStr(sema.code); - try ip.string_bytes.ensureUnusedCapacity( - gpa, - ip.stringToSlice(src_decl.name).len + zir_str.len + 10, - ); - const name = ip.getOrPutStringFmt(gpa, "{s}.{s}", .{ - ip.stringToSlice(src_decl.name), zir_str, - }) catch unreachable; + const name = try mod.intern_pool.getOrPutStringFmt(gpa, "{}.{s}", .{ + src_decl.name.fmt(&mod.intern_pool), zir_data[i].str_op.getStr(sema.code), + }); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; @@ -4538,8 +4526,8 @@ fn validateStructInit( continue; } const field_name = struct_ty.structFieldName(i, mod); - const template = "missing struct field: {s}"; - const args = .{ip.stringToSlice(field_name)}; + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -4560,12 +4548,12 @@ fn validateStructInit( if (root_msg) |msg| { if (mod.typeToStruct(struct_ty)) |struct_obj| { - const fqn = ip.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); + const fqn = try struct_obj.getFullyQualifiedName(mod); try mod.errNoteNonLazy( struct_obj.srcLoc(mod), msg, - "struct '{s}' declared here", - .{fqn}, + "struct '{}' declared here", + .{fqn.fmt(ip)}, ); } root_msg = null; @@ -4682,8 +4670,8 @@ fn validateStructInit( continue; } const field_name = struct_ty.structFieldName(i, mod); - const template = "missing struct field: {s}"; - const args = .{ip.stringToSlice(field_name)}; + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -4696,12 +4684,12 @@ fn validateStructInit( if (root_msg) |msg| { if (mod.typeToStruct(struct_ty)) |struct_obj| { - const fqn = ip.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); - try sema.mod.errNoteNonLazy( + const fqn = try struct_obj.getFullyQualifiedName(mod); + try mod.errNoteNonLazy( struct_obj.srcLoc(mod), msg, - "struct '{s}' declared here", - .{fqn}, + "struct '{}' declared here", + .{fqn.fmt(ip)}, ); } root_msg = null; @@ -4942,11 +4930,11 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag(mod) != .Pointer) { - return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(sema.mod)}); + return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(mod)}); } else switch (operand_ty.ptrSize(mod)) { .One, .C => {}, - .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(sema.mod)}), - .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(sema.mod)}), + .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(mod)}), + .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(mod)}), } if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) { @@ -4965,11 +4953,11 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr block, src, "values of type '{}' must be comptime-known, but operand value is runtime-known", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), elem_ty); break :msg msg; }; @@ -4982,7 +4970,7 @@ fn failWithBadMemberAccess( block: *Block, agg_ty: Type, field_src: LazySrcLoc, - field_name_nts: InternPool.NullTerminatedString, + field_name: InternPool.NullTerminatedString, ) CompileError { const mod = sema.mod; const kw_name = switch (agg_ty.zigTypeTag(mod)) { @@ -4992,15 +4980,14 @@ fn failWithBadMemberAccess( .Enum => "enum", else => unreachable, }; - const field_name = mod.intern_pool.stringToSlice(field_name_nts); - if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (sema.mod.declIsRoot(some)) { - return sema.fail(block, field_src, "root struct of file '{}' has no member named '{s}'", .{ - agg_ty.fmt(sema.mod), field_name, + if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (mod.declIsRoot(some)) { + return sema.fail(block, field_src, "root struct of file '{}' has no member named '{}'", .{ + agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool), }); }; const msg = msg: { - const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{s}'", .{ - kw_name, agg_ty.fmt(sema.mod), field_name, + const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{}'", .{ + kw_name, agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, agg_ty); @@ -5018,16 +5005,15 @@ fn failWithBadStructFieldAccess( ) CompileError { const mod = sema.mod; const gpa = sema.gpa; - const ip = &mod.intern_pool; - const fqn = ip.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); + const fqn = try struct_obj.getFullyQualifiedName(mod); const msg = msg: { const msg = try sema.errMsg( block, field_src, - "no field named '{s}' in struct '{s}'", - .{ ip.stringToSlice(field_name), fqn }, + "no field named '{}' in struct '{}'", + .{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) }, ); errdefer msg.destroy(gpa); try mod.errNoteNonLazy(struct_obj.srcLoc(mod), msg, "struct declared here", .{}); @@ -5045,16 +5031,15 @@ fn failWithBadUnionFieldAccess( ) CompileError { const mod = sema.mod; const gpa = sema.gpa; - const ip = &mod.intern_pool; - const fqn = ip.stringToSlice(try union_obj.getFullyQualifiedName(mod)); + const fqn = try union_obj.getFullyQualifiedName(mod); const msg = msg: { const msg = try sema.errMsg( block, field_src, - "no field named '{s}' in union '{s}'", - .{ ip.stringToSlice(field_name), fqn }, + "no field named '{}' in union '{}'", + .{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) }, ); errdefer msg.destroy(gpa); try mod.errNoteNonLazy(union_obj.srcLoc(mod), msg, "union declared here", .{}); @@ -5334,7 +5319,9 @@ fn zirCompileLog( sema: *Sema, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - var managed = sema.mod.compile_log_text.toManaged(sema.gpa); + const mod = sema.mod; + + var managed = mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -5349,16 +5336,16 @@ fn zirCompileLog( const arg_ty = sema.typeOf(arg); if (try sema.resolveMaybeUndefLazyVal(arg)) |val| { try writer.print("@as({}, {})", .{ - arg_ty.fmt(sema.mod), val.fmtValue(arg_ty, sema.mod), + arg_ty.fmt(mod), val.fmtValue(arg_ty, mod), }); } else { - try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(sema.mod)}); + try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(mod)}); } } try writer.print("\n", .{}); const decl_index = if (sema.func) |some| some.owner_decl else sema.owner_decl_index; - const gop = try sema.mod.compile_log_decls.getOrPut(sema.gpa, decl_index); + const gop = try mod.compile_log_decls.getOrPut(sema.gpa, decl_index); if (!gop.found_existing) { gop.value_ptr.* = src_node; } @@ -5509,7 +5496,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr if (!mod.comp.bin_file.options.link_libc) try sema.errNote(&child_block, src, msg, "libc headers not available; compilation does not link against libc", .{}); - const gop = try sema.mod.cimport_errors.getOrPut(sema.gpa, sema.owner_decl_index); + const gop = try mod.cimport_errors.getOrPut(sema.gpa, sema.owner_decl_index); if (!gop.found_existing) { var errs = try std.ArrayListUnmanaged(Module.CImportError).initCapacity(sema.gpa, c_import_res.errors.len); errdefer { @@ -5869,13 +5856,13 @@ pub fn analyzeExport( sema: *Sema, block: *Block, src: LazySrcLoc, - borrowed_options: std.builtin.ExportOptions, + options: Module.Export.Options, exported_decl_index: Decl.Index, ) !void { const Export = Module.Export; const mod = sema.mod; - if (borrowed_options.linkage == .Internal) { + if (options.linkage == .Internal) { return; } @@ -5884,10 +5871,10 @@ pub fn analyzeExport( if (!try sema.validateExternType(exported_decl.ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "unable to export type '{}'", .{exported_decl.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "unable to export type '{}'", .{exported_decl.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), exported_decl.ty, .other); try sema.addDeclaredHereNote(msg, exported_decl.ty); @@ -5913,14 +5900,8 @@ pub fn analyzeExport( const new_export = try gpa.create(Export); errdefer gpa.destroy(new_export); - const symbol_name = try mod.intern_pool.getOrPutString(gpa, borrowed_options.name); - const section = try mod.intern_pool.getOrPutStringOpt(gpa, borrowed_options.section); - new_export.* = .{ - .name = symbol_name, - .linkage = borrowed_options.linkage, - .section = section, - .visibility = borrowed_options.visibility, + .opts = options, .src = src, .owner_decl = sema.owner_decl_index, .src_decl = block.src_decl, @@ -6198,7 +6179,7 @@ fn lookupInNamespace( const namespace = mod.namespacePtr(namespace_index); const namespace_decl_index = namespace.getDeclIndex(mod); - const namespace_decl = sema.mod.declPtr(namespace_decl_index); + const namespace_decl = mod.declPtr(namespace_decl_index); if (namespace_decl.analysis == .file_failure) { try mod.declareDeclDependency(sema.owner_decl_index, namespace_decl_index); return error.AnalysisFail; @@ -6531,7 +6512,7 @@ fn zirCall( // AstGen ensures that a call instruction is always preceded by a dbg_stmt instruction. const call_dbg_node = inst - 1; - if (sema.mod.backendSupportsFeature(.error_return_trace) and sema.mod.comp.bin_file.options.error_return_tracing and + if (mod.backendSupportsFeature(.error_return_trace) and mod.comp.bin_file.options.error_return_tracing and !block.is_comptime and !block.is_typeof and (input_is_error or pop_error_return_trace)) { const call_inst: Air.Inst.Ref = if (modifier == .always_tail) undefined else b: { @@ -6599,7 +6580,7 @@ fn checkCallArgumentCount( { const msg = msg: { const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{ - callee_ty.fmt(sema.mod), + callee_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, func_src, msg, "consider using '.?', 'orelse' or 'if'", .{}); @@ -6610,7 +6591,7 @@ fn checkCallArgumentCount( }, else => {}, } - return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(sema.mod)}); + return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(mod)}); }; const func_ty_info = mod.typeToFunc(func_ty).?; @@ -6640,7 +6621,7 @@ fn checkCallArgumentCount( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); + if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -6666,7 +6647,7 @@ fn callBuiltin( }, else => {}, } - std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(sema.mod)}); + std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(mod)}); }; const func_ty_info = mod.typeToFunc(func_ty).?; @@ -6942,7 +6923,7 @@ fn analyzeCall( ) catch |err| switch (err) { error.NeededSourceLocation => { _ = sema.inst_map.remove(inst); - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); try sema.analyzeInlineCallArg( block, &child_block, @@ -7111,7 +7092,7 @@ fn analyzeCall( opts, ) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); _ = try sema.analyzeCallArg( block, mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src), @@ -7126,7 +7107,7 @@ fn analyzeCall( } else { args[i] = sema.coerceVarArgParam(block, uncasted_arg, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); _ = try sema.coerceVarArgParam( block, uncasted_arg, @@ -7148,7 +7129,7 @@ fn analyzeCall( if (try sema.resolveMaybeUndefVal(func)) |func_val| { if (mod.intern_pool.indexToFunc(func_val.toIntern()).unwrap()) |func_index| { - try sema.mod.ensureFuncBodyAnalysisQueued(func_index); + try mod.ensureFuncBodyAnalysisQueued(func_index); } } @@ -7201,17 +7182,18 @@ fn analyzeCall( } fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref { - const target = sema.mod.getTarget(); - const backend = sema.mod.comp.getZigBackend(); + const mod = sema.mod; + const target = mod.getTarget(); + const backend = mod.comp.getZigBackend(); if (!target_util.supportsTailCall(target, backend)) { return sema.fail(block, call_src, "unable to perform tail call: compiler backend '{s}' does not support tail calls on target architecture '{s}' with the selected CPU feature flags", .{ @tagName(backend), @tagName(target.cpu.arch), }); } - const func_decl = sema.mod.declPtr(sema.owner_func.?.owner_decl); - if (!func_ty.eql(func_decl.ty, sema.mod)) { + const func_decl = mod.declPtr(sema.owner_func.?.owner_decl); + if (!func_ty.eql(func_decl.ty, mod)) { return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{ - func_ty.fmt(sema.mod), func_decl.ty.fmt(sema.mod), + func_ty.fmt(mod), func_decl.ty.fmt(mod), }); } _ = try block.addUnOp(.ret, result); @@ -7404,10 +7386,9 @@ fn instantiateGenericCall( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; - const ip = &mod.intern_pool; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn_index = switch (ip.indexToKey(func_val.toIntern())) { + const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .func => |function| function.index, .ptr => |ptr| mod.declPtr(ptr.addr.decl).val.getFunctionIndex(mod).unwrap().?, else => unreachable, @@ -7467,7 +7448,7 @@ fn instantiateGenericCall( if (is_comptime) { const arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, uncasted_args[arg_i]) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); _ = try sema.analyzeGenericCallArgVal(block, arg_src, uncasted_args[arg_i]); unreachable; @@ -7491,7 +7472,7 @@ fn instantiateGenericCall( }; const casted_arg = sema.coerce(block, final_arg_ty.toType(), uncasted_args[arg_i], .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); _ = try sema.coerce(block, final_arg_ty.toType(), uncasted_args[arg_i], arg_src); unreachable; @@ -7500,7 +7481,7 @@ fn instantiateGenericCall( }; const casted_arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, casted_arg) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); _ = try sema.analyzeGenericCallArgVal(block, arg_src, casted_arg); unreachable; @@ -7540,12 +7521,9 @@ fn instantiateGenericCall( const new_decl_index = try mod.allocateNewDecl(namespace_index, fn_owner_decl.src_node, src_decl.src_scope); const new_decl = mod.declPtr(new_decl_index); // TODO better names for generic function instantiations - // The ensureUnusedCapacity here protects against fn_owner_decl.name slice being - // reallocated during getOrPutStringFmt. - try ip.string_bytes.ensureUnusedCapacity(gpa, ip.stringToSlice(fn_owner_decl.name).len + 20); - const decl_name = ip.getOrPutStringFmt(gpa, "{s}__anon_{d}", .{ - ip.stringToSlice(fn_owner_decl.name), @enumToInt(new_decl_index), - }) catch unreachable; + const decl_name = try mod.intern_pool.getOrPutStringFmt(gpa, "{}__anon_{d}", .{ + fn_owner_decl.name.fmt(&mod.intern_pool), @enumToInt(new_decl_index), + }); new_decl.name = decl_name; new_decl.src_line = fn_owner_decl.src_line; new_decl.is_pub = fn_owner_decl.is_pub; @@ -7634,7 +7612,7 @@ fn instantiateGenericCall( &runtime_i, ) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); _ = try sema.analyzeGenericCallArg( block, mod.argSrc(call_src.node_offset.x, decl, total_i, bound_arg_src), @@ -7660,7 +7638,7 @@ fn instantiateGenericCall( sema.owner_func.?.calls_or_awaits_errorable_fn = true; } - try sema.mod.ensureFuncBodyAnalysisQueued(callee_index); + try mod.ensureFuncBodyAnalysisQueued(callee_index); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args_len); @@ -7788,7 +7766,7 @@ fn resolveGenericInstantiationType( if (try sema.typeRequiresComptime(arg_ty)) { const arg_val = sema.resolveConstValue(block, .unneeded, arg, "") catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); _ = try sema.resolveConstValue(block, arg_src, arg, "argument to parameter with comptime-only type must be comptime-known"); unreachable; @@ -7981,9 +7959,9 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; const child_type = try sema.resolveType(block, operand_src, inst_data.operand); if (child_type.zigTypeTag(mod) == .Opaque) { - return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); + return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(mod)}); } else if (child_type.zigTypeTag(mod) == .Null) { - return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); + return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(mod)}); } const opt_type = try Type.optional(sema.arena, child_type, mod); @@ -8059,7 +8037,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void { const mod = sema.mod; if (elem_type.zigTypeTag(mod) == .Opaque) { - return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(sema.mod)}); + return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(mod)}); } else if (elem_type.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{}); } @@ -8095,7 +8073,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr if (error_set.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{ - error_set.fmt(sema.mod), + error_set.fmt(mod), }); } try sema.validateErrorUnionPayloadType(block, payload, rhs_src); @@ -8107,11 +8085,11 @@ fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, p const mod = sema.mod; if (payload_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{ - payload_ty.fmt(sema.mod), + payload_ty.fmt(mod), }); } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) { return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{ - payload_ty.fmt(sema.mod), + payload_ty.fmt(mod), }); } } @@ -8123,7 +8101,7 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); _ = try mod.getErrorValue(name); // Create an error set type with only this error value, and return the value. - const error_set_type = try mod.singleErrorSetTypeNts(name); + const error_set_type = try mod.singleErrorSetType(name); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = name, @@ -8231,9 +8209,9 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); if (lhs_ty.zigTypeTag(mod) != .ErrorSet) - return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(sema.mod)}); + return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(mod)}); if (rhs_ty.zigTypeTag(mod) != .ErrorSet) - return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)}); + return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(mod)}); // Anything merged with anyerror is anyerror. if (lhs_ty.toIntern() == .anyerror_type or rhs_ty.toIntern() == .anyerror_type) { @@ -8296,7 +8274,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => { return sema.fail(block, operand_src, "expected enum or tagged union, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); }, }; @@ -8328,7 +8306,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag(mod) != .Enum) { - return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(sema.mod)}); + return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(mod)}); } _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); @@ -8343,7 +8321,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A block, src, "int value '{}' out of range of non-exhaustive enum '{}'", - .{ int_val.fmtValue(sema.typeOf(operand), sema.mod), dest_ty.fmt(sema.mod) }, + .{ int_val.fmtValue(sema.typeOf(operand), mod), dest_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -8360,7 +8338,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A block, src, "enum '{}' has no tag with value '{}'", - .{ dest_ty.fmt(sema.mod), int_val.fmtValue(sema.typeOf(operand), sema.mod) }, + .{ dest_ty.fmt(mod), int_val.fmtValue(sema.typeOf(operand), mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -8383,7 +8361,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.requireRuntimeBlock(block, src, operand_src); const result = try block.addTyOp(.intcast, dest_ty, operand); if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum(mod) and - sema.mod.backendSupportsFeature(.is_named_enum_value)) + mod.backendSupportsFeature(.is_named_enum_value)) { const ok = try block.addUnOp(.is_named_enum_value, result); try sema.addSafetyCheck(block, ok, .invalid_enum_value); @@ -8422,11 +8400,11 @@ fn analyzeOptionalPayloadPtr( const opt_type = optional_ptr_ty.childType(mod); if (opt_type.zigTypeTag(mod) != .Optional) { - return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(sema.mod)}); + return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(mod)}); } const child_type = opt_type.optionalChild(mod); - const child_pointer = try Type.ptr(sema.arena, sema.mod, .{ + const child_pointer = try Type.ptr(sema.arena, mod, .{ .pointee_type = child_type, .mutable = !optional_ptr_ty.isConstPtr(mod), .@"addrspace" = optional_ptr_ty.ptrAddressSpace(mod), @@ -8493,7 +8471,7 @@ fn zirOptionalPayload( // TODO https://github.com/ziglang/zig/issues/6597 if (true) break :t operand_ty; const ptr_info = operand_ty.ptrInfo(mod); - break :t try Type.ptr(sema.arena, sema.mod, .{ + break :t try Type.ptr(sema.arena, mod, .{ .pointee_type = ptr_info.pointee_type, .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", @@ -8538,7 +8516,7 @@ fn zirErrUnionPayload( const err_union_ty = sema.typeOf(operand); if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } return sema.analyzeErrUnionPayload(block, src, err_union_ty, operand, operand_src, false); @@ -8556,8 +8534,8 @@ fn analyzeErrUnionPayload( const mod = sema.mod; const payload_ty = err_union_ty.errorUnionPayload(mod); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - if (val.getError(mod)) |name| { - return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); + if (val.getErrorName(mod).unwrap()) |name| { + return sema.fail(block, src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)}); } return sema.addConstant( payload_ty, @@ -8607,13 +8585,13 @@ fn analyzeErrUnionPayloadPtr( if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.childType(mod).fmt(sema.mod), + operand_ty.childType(mod).fmt(mod), }); } const err_union_ty = operand_ty.childType(mod); const payload_ty = err_union_ty.errorUnionPayload(mod); - const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ + const operand_pointer_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = payload_ty, .mutable = !operand_ty.isConstPtr(mod), .@"addrspace" = operand_ty.ptrAddressSpace(mod), @@ -8634,8 +8612,8 @@ fn analyzeErrUnionPayloadPtr( } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { - if (val.getError(mod)) |name| { - return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); + if (val.getErrorName(mod).unwrap()) |name| { + return sema.fail(block, src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)}); } return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ .ty = operand_pointer_ty.toIntern(), @@ -8676,7 +8654,7 @@ fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } @@ -8707,7 +8685,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.childType(mod).fmt(sema.mod), + operand_ty.childType(mod).fmt(mod), }); } @@ -8715,7 +8693,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { - assert(val.getError(mod) != null); + assert(val.getErrorName(mod) != .none); return sema.addConstant(result_ty, val); } } @@ -8968,7 +8946,7 @@ fn funcCommon( }; errdefer if (destroy_fn_on_error) mod.destroyFunc(new_func_index); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const fn_ty: Type = fn_ty: { // In the case of generic calling convention, or generic alignment, we use // default values which are only meaningful for the generic function, *not* @@ -8995,7 +8973,7 @@ fn funcCommon( is_noalias, ) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); try sema.analyzeParameter( block, Module.paramSrc(src_node_offset, mod, decl, i), @@ -9040,7 +9018,7 @@ fn funcCommon( const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else ""; const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{ - opaque_str, return_type.fmt(sema.mod), + opaque_str, return_type.fmt(mod), }); errdefer msg.destroy(gpa); @@ -9054,11 +9032,11 @@ fn funcCommon( { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{ - return_type.fmt(sema.mod), @tagName(cc_resolved), + return_type.fmt(mod), @tagName(cc_resolved), }); errdefer msg.destroy(gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty); try sema.addDeclaredHereNote(msg, return_type); @@ -9077,7 +9055,7 @@ fn funcCommon( block, ret_ty_src, "function with comptime-only return type '{}' requires all parameters to be comptime", - .{return_type.fmt(sema.mod)}, + .{return_type.fmt(mod)}, ); try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl, mod), return_type); @@ -9102,7 +9080,7 @@ fn funcCommon( return sema.failWithOwnedErrorMsg(msg); } - const arch = sema.mod.getTarget().cpu.arch; + const arch = mod.getTarget().cpu.arch; if (switch (cc_resolved) { .Unspecified, .C, .Naked, .Async, .Inline => null, .Interrupt => switch (arch) { @@ -9542,7 +9520,7 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ptr = try sema.resolveInst(inst_data.operand); const ptr_ty = sema.typeOf(ptr); if (!ptr_ty.isPtrAtRuntime(mod)) { - return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}); + return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(mod)}); } if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| { return sema.addConstant( @@ -9797,14 +9775,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Type, .Undefined, .Void, - => return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}), + => return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}), .Enum => { const msg = msg: { - const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); switch (operand_ty.zigTypeTag(mod)) { - .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToEnum to cast from '{}'", .{operand_ty.fmt(sema.mod)}), + .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToEnum to cast from '{}'", .{operand_ty.fmt(mod)}), else => {}, } @@ -9815,11 +9793,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Pointer => { const msg = msg: { - const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); switch (operand_ty.zigTypeTag(mod)) { - .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToPtr to cast from '{}'", .{operand_ty.fmt(sema.mod)}), - .Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(sema.mod)}), + .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToPtr to cast from '{}'", .{operand_ty.fmt(mod)}), + .Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(mod)}), else => {}, } @@ -9834,7 +9812,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air else => unreachable, }; return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{ - dest_ty.fmt(sema.mod), container, + dest_ty.fmt(mod), container, }); }, @@ -9861,14 +9839,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Type, .Undefined, .Void, - => return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}), + => return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}), .Enum => { const msg = msg: { - const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); switch (dest_ty.zigTypeTag(mod)) { - .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @enumToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}), + .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @enumToInt to cast to '{}'", .{dest_ty.fmt(mod)}), else => {}, } @@ -9878,11 +9856,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, .Pointer => { const msg = msg: { - const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); switch (dest_ty.zigTypeTag(mod)) { - .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @ptrToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}), - .Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(sema.mod)}), + .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @ptrToInt to cast to '{}'", .{dest_ty.fmt(mod)}), + .Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(mod)}), else => {}, } @@ -9897,7 +9875,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air else => unreachable, }; return sema.fail(block, operand_src, "cannot @bitCast from '{}'; {s} does not have a guaranteed in-memory layout", .{ - operand_ty.fmt(sema.mod), container, + operand_ty.fmt(mod), container, }); }, @@ -9924,7 +9902,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const dest_is_comptime_float = switch (dest_ty.zigTypeTag(mod)) { .ComptimeFloat => true, .Float => false, @@ -9932,7 +9910,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A block, dest_ty_src, "expected float type, found '{}'", - .{dest_ty.fmt(sema.mod)}, + .{dest_ty.fmt(mod)}, ), }; @@ -9943,7 +9921,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A block, operand_src, "expected float type, found '{}'", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ), } @@ -10002,7 +9980,7 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const capture_src: LazySrcLoc = .{ .for_capture_from_input = inst_data.src_node }; const msg = msg: { const msg = try sema.errMsg(block, capture_src, "pointer capture of non pointer type '{}'", .{ - indexable_ty.fmt(sema.mod), + indexable_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (indexable_ty.zigTypeTag(mod) == .Array) { @@ -10143,12 +10121,12 @@ fn zirSwitchCapture( const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable; const resolved_item_val = try sema.resolveLazyValue(item_val); if (operand_ty.zigTypeTag(mod) == .Union) { - const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(resolved_item_val, sema.mod).?); + const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(resolved_item_val, mod).?); const union_obj = mod.typeToUnion(operand_ty).?; const field_ty = union_obj.fields.values()[field_index].ty; if (try sema.resolveDefinedValue(block, sema.src, operand_ptr)) |union_val| { if (is_ref) { - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = field_ty, .mutable = operand_ptr_ty.ptrIsMutable(mod), .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), @@ -10168,7 +10146,7 @@ fn zirSwitchCapture( ); } if (is_ref) { - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = field_ty, .mutable = operand_ptr_ty.ptrIsMutable(mod), .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), @@ -10221,7 +10199,7 @@ fn zirSwitchCapture( // Previous switch validation ensured this will succeed const first_item_val = sema.resolveConstValue(block, .unneeded, first_item, "") catch unreachable; - const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, sema.mod).?); + const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, mod).?); const first_field = union_obj.fields.values()[first_field_index]; for (items[1..], 0..) |item, i| { @@ -10229,22 +10207,22 @@ fn zirSwitchCapture( // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - const field_index = operand_ty.unionTagFieldIndex(item_val, sema.mod).?; + const field_index = operand_ty.unionTagFieldIndex(item_val, mod).?; const field = union_obj.fields.values()[field_index]; - if (!field.ty.eql(first_field.ty, sema.mod)) { + if (!field.ty.eql(first_field.ty, mod)) { const msg = msg: { const raw_capture_src = Module.SwitchProngSrc{ .multi_capture = capture_info.prong_index }; - const capture_src = raw_capture_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); + const capture_src = raw_capture_src.resolve(mod, mod.declPtr(block.src_decl), switch_info.src_node, .first); const msg = try sema.errMsg(block, capture_src, "capture group with incompatible types", .{}); errdefer msg.destroy(gpa); const raw_first_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 0 } }; - const first_item_src = raw_first_item_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); + const first_item_src = raw_first_item_src.resolve(mod, mod.declPtr(block.src_decl), switch_info.src_node, .first); const raw_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 1 + @intCast(u32, i) } }; - const item_src = raw_item_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); - try sema.errNote(block, first_item_src, msg, "type '{}' here", .{first_field.ty.fmt(sema.mod)}); - try sema.errNote(block, item_src, msg, "type '{}' here", .{field.ty.fmt(sema.mod)}); + const item_src = raw_item_src.resolve(mod, mod.declPtr(block.src_decl), switch_info.src_node, .first); + try sema.errNote(block, first_item_src, msg, "type '{}' here", .{first_field.ty.fmt(mod)}); + try sema.errNote(block, item_src, msg, "type '{}' here", .{field.ty.fmt(mod)}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -10252,7 +10230,7 @@ fn zirSwitchCapture( } if (is_ref) { - const field_ty_ptr = try Type.ptr(sema.arena, sema.mod, .{ + const field_ty_ptr = try Type.ptr(sema.arena, mod, .{ .pointee_type = first_field.ty, .@"addrspace" = .generic, .mutable = operand_ptr_ty.ptrIsMutable(mod), @@ -10288,8 +10266,7 @@ fn zirSwitchCapture( const item_ref = try sema.resolveInst(item); // Previous switch validation ensured this will succeed const item_val = sema.resolveConstLazyValue(block, .unneeded, item_ref, "") catch unreachable; - const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError(mod).?); - names.putAssumeCapacityNoClobber(name_ip, {}); + names.putAssumeCapacityNoClobber(item_val.getErrorName(mod).unwrap().?, {}); } const else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); @@ -10299,7 +10276,7 @@ fn zirSwitchCapture( // Previous switch validation ensured this will succeed const item_val = sema.resolveConstLazyValue(block, .unneeded, item_ref, "") catch unreachable; - const item_ty = try mod.singleErrorSetType(item_val.getError(mod).?); + const item_ty = try mod.singleErrorSetType(item_val.getErrorName(mod).unwrap().?); return sema.bitCast(block, item_ty, operand, operand_src, null); } }, @@ -10331,7 +10308,7 @@ fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile if (operand_ty.zigTypeTag(mod) != .Union) { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot capture tag of non-union type '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, operand_ty); @@ -10375,7 +10352,7 @@ fn zirSwitchCond( .Enum, => { if (operand_ty.isSlice(mod)) { - return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(sema.mod)}); + return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)}); } if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| { return sema.addConstant(operand_ty, opv); @@ -10389,8 +10366,8 @@ fn zirSwitchCond( const msg = msg: { const msg = try sema.errMsg(block, src, "switch on union with no attached enum", .{}); errdefer msg.destroy(sema.gpa); - if (union_ty.declSrcLocOrNull(sema.mod)) |union_src| { - try sema.mod.errNoteNonLazy(union_src, msg, "consider 'union(enum)' here", .{}); + if (union_ty.declSrcLocOrNull(mod)) |union_src| { + try mod.errNoteNonLazy(union_src, msg, "consider 'union(enum)' here", .{}); } break :msg msg; }; @@ -10410,11 +10387,11 @@ fn zirSwitchCond( .Vector, .Frame, .AnyFrame, - => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(sema.mod)}), + => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)}), } } -const SwitchErrorSet = std.StringHashMap(Module.SwitchProngSrc); +const SwitchErrorSet = std.AutoHashMap(InternPool.NullTerminatedString, Module.SwitchProngSrc); fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); @@ -10593,8 +10570,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError operand_ty, i, msg, - "unhandled enumeration value: '{s}'", - .{ip.stringToSlice(field_name)}, + "unhandled enumeration value: '{}'", + .{field_name.fmt(&mod.intern_pool)}, ); } try mod.errNoteNonLazy( @@ -10677,8 +10654,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var maybe_msg: ?*Module.ErrorMsg = null; errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa); - for (operand_ty.errorSetNames(mod)) |error_name_ip| { - const error_name = ip.stringToSlice(error_name_ip); + for (operand_ty.errorSetNames(mod)) |error_name| { if (!seen_errors.contains(error_name) and special_prong != .@"else") { const msg = maybe_msg orelse blk: { maybe_msg = try sema.errMsg( @@ -10694,8 +10670,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, src, msg, - "unhandled error value: 'error.{s}'", - .{error_name}, + "unhandled error value: 'error.{}'", + .{error_name.fmt(ip)}, ); } } @@ -10746,11 +10722,10 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const error_names = operand_ty.errorSetNames(mod); var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, error_names.len); - for (error_names) |error_name_ip| { - const error_name = ip.stringToSlice(error_name_ip); + for (error_names) |error_name| { if (seen_errors.contains(error_name)) continue; - names.putAssumeCapacityNoClobber(error_name_ip, {}); + names.putAssumeCapacityNoClobber(error_name, {}); } // No need to keep the hash map metadata correct; here we // extract the (sorted) keys only. @@ -11500,14 +11475,13 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError }); } for (0..operand_ty.errorSetNames(mod).len) |i| { - const error_name_ip = operand_ty.errorSetNames(mod)[i]; - const error_name = mod.intern_pool.stringToSlice(error_name_ip); + const error_name = operand_ty.errorSetNames(mod)[i]; if (seen_errors.contains(error_name)) continue; cases_len += 1; const item_val = try mod.intern(.{ .err = .{ .ty = operand_ty.toIntern(), - .name = error_name_ip, + .name = error_name, } }); const item_ref = try sema.addConstant(operand_ty, item_val.toValue()); case_block.inline_case_capture = item_ref; @@ -11754,7 +11728,7 @@ fn resolveSwitchItemVal( return val.toIntern(); } else |err| switch (err) { error.NeededSourceLocation => { - const src = switch_prong_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_node_offset, range_expand); + const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), switch_node_offset, range_expand); _ = try sema.resolveConstValue(block, src, item, "switch prong values must be comptime-known"); unreachable; }, @@ -11827,7 +11801,7 @@ fn validateSwitchItemError( const ip = &sema.mod.intern_pool; const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); // TODO: Do i need to typecheck here? - const error_name = ip.stringToSlice(ip.indexToKey(item).err.name); + const error_name = ip.indexToKey(item).err.name; const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev| prev.value else @@ -11844,7 +11818,7 @@ fn validateSwitchDupe( ) CompileError!void { const prev_prong_src = maybe_prev_src orelse return; const mod = sema.mod; - const block_src_decl = sema.mod.declPtr(block.src_decl); + const block_src_decl = mod.declPtr(block.src_decl); const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); const prev_src = prev_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); const msg = msg: { @@ -11884,7 +11858,7 @@ fn validateSwitchItemBool( false_count.* += 1; } if (true_count.* + false_count.* > 2) { - const block_src_decl = sema.mod.declPtr(block.src_decl); + const block_src_decl = mod.declPtr(block.src_decl); const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); return sema.fail(block, src, "duplicate switch value", .{}); } @@ -12020,7 +11994,7 @@ fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Ind } if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| { if (!operand_ty.isError(mod)) return; - if (val.getError(mod) == null) return; + if (val.getErrorName(mod) == .none) return; try sema.maybeErrorUnwrapComptime(block, body, err_operand); } } @@ -12042,8 +12016,8 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I const src = inst_data.src(); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.getError(sema.mod)) |name| { - return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); + if (val.getErrorName(sema.mod).unwrap()) |name| { + return sema.fail(block, src, "caught unexpected error '{}'", .{name.fmt(&sema.mod.intern_pool)}); } } } @@ -12073,7 +12047,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (anon_struct.names.len != 0) { break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, field_name) != null; } else { - const field_index = std.fmt.parseUnsigned(u32, ip.stringToSlice(field_name), 10) catch break :hf false; + const field_index = field_name.toUnsigned(ip) orelse break :hf false; break :hf field_index < ty.structFieldCount(mod); } }, @@ -12094,7 +12068,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else => {}, } return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ - ty.fmt(sema.mod), + ty.fmt(mod), }); }; if (has_field) { @@ -12209,7 +12183,7 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); _ = try mod.getErrorValue(name); - const error_set_type = try mod.singleErrorSetTypeNts(name); + const error_set_type = try mod.singleErrorSetType(name); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = name, @@ -12260,36 +12234,36 @@ fn zirShl( if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - const rhs_elem = try rhs_val.elemValue(sema.mod, i); + const rhs_elem = try rhs_val.elemValue(mod, i); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ - rhs_elem.fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, mod), i, - scalar_ty.fmt(sema.mod), + scalar_ty.fmt(mod), }); } } } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ - rhs_val.fmtValue(scalar_ty, sema.mod), - scalar_ty.fmt(sema.mod), + rhs_val.fmtValue(scalar_ty, mod), + scalar_ty.fmt(mod), }); } } if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - const rhs_elem = try rhs_val.elemValue(sema.mod, i); + const rhs_elem = try rhs_val.elemValue(mod, i); if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ - rhs_elem.fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, mod), i, }); } } } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ - rhs_val.fmtValue(scalar_ty, sema.mod), + rhs_val.fmtValue(scalar_ty, mod), }); } } @@ -12305,25 +12279,25 @@ fn zirShl( const val = switch (air_tag) { .shl_exact => val: { - const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, sema.mod); + const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, mod); if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { break :val shifted.wrapped_result; } - if (shifted.overflow_bit.compareAllWithZero(.eq, sema.mod)) { + if (shifted.overflow_bit.compareAllWithZero(.eq, mod)) { break :val shifted.wrapped_result; } return sema.fail(block, src, "operation caused overflow", .{}); }, .shl_sat => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) - try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) + try lhs_val.shl(rhs_val, lhs_ty, sema.arena, mod) else - try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod), + try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, mod), .shl => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) - try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) + try lhs_val.shl(rhs_val, lhs_ty, sema.arena, mod) else - try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, sema.mod), + try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, mod), else => unreachable, }; @@ -12441,36 +12415,36 @@ fn zirShr( if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - const rhs_elem = try rhs_val.elemValue(sema.mod, i); + const rhs_elem = try rhs_val.elemValue(mod, i); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ - rhs_elem.fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, mod), i, - scalar_ty.fmt(sema.mod), + scalar_ty.fmt(mod), }); } } } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ - rhs_val.fmtValue(scalar_ty, sema.mod), - scalar_ty.fmt(sema.mod), + rhs_val.fmtValue(scalar_ty, mod), + scalar_ty.fmt(mod), }); } } if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - const rhs_elem = try rhs_val.elemValue(sema.mod, i); + const rhs_elem = try rhs_val.elemValue(mod, i); if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ - rhs_elem.fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, mod), i, }); } } } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ - rhs_val.fmtValue(scalar_ty, sema.mod), + rhs_val.fmtValue(scalar_ty, mod), }); } if (maybe_lhs_val) |lhs_val| { @@ -12479,12 +12453,12 @@ fn zirShr( } if (air_tag == .shr_exact) { // Detect if any ones would be shifted out. - const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, sema.mod); + const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, mod); if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) { return sema.fail(block, src, "exact shift shifted out 1 bits", .{}); } } - const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, sema.mod); + const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, mod); return sema.addConstant(lhs_ty, val); } else { break :rs lhs_src; @@ -12580,9 +12554,9 @@ fn zirBitwise( if (try sema.resolveMaybeUndefValIntable(casted_lhs)) |lhs_val| { if (try sema.resolveMaybeUndefValIntable(casted_rhs)) |rhs_val| { const result_val = switch (air_tag) { - .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, sema.mod), - .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, sema.mod), - .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, sema.mod), + .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, mod), + .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, mod), + .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, mod), else => unreachable, }; return sema.addConstant(resolved_type, result_val); @@ -12613,7 +12587,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (scalar_type.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{ - operand_type.fmt(sema.mod), + operand_type.fmt(mod), }); } @@ -12624,15 +12598,15 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); - elem.* = try (try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod)).intern(scalar_type, mod); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.bitwiseNot(scalar_type, sema.arena, mod)).intern(scalar_type, mod); } return sema.addConstant(operand_type, (try mod.intern(.{ .aggregate = .{ .ty = operand_type.toIntern(), .storage = .{ .elems = elems }, } })).toValue()); } else { - const result_val = try val.bitwiseNot(operand_type, sema.arena, sema.mod); + const result_val = try val.bitwiseNot(operand_type, sema.arena, mod); return sema.addConstant(operand_type, result_val); } } @@ -12949,7 +12923,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins return Type.ArrayInfo{ .elem_type = ptr_info.pointee_type, .sentinel = ptr_info.sentinel, - .len = val.sliceLen(sema.mod), + .len = val.sliceLen(mod), }; }, .One => { @@ -13195,14 +13169,14 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .Int, .ComptimeInt, .Float, .ComptimeFloat => false, else => true, }) { - return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}); + return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)}); } if (rhs_scalar_ty.isAnyFloat()) { // We handle float negation here to ensure negative zero is represented in the bits. if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { if (rhs_val.isUndef(mod)) return sema.addConstUndef(rhs_ty); - return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, sema.mod)); + return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, mod)); } try sema.requireRuntimeBlock(block, src, null); return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs); @@ -13225,7 +13199,7 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => {}, - else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)}), } const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))); @@ -14099,8 +14073,8 @@ fn intRem( const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(sema.mod, i); - const rhs_elem = try rhs.elemValue(sema.mod, i); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); } return (try mod.intern(.{ .aggregate = .{ @@ -14499,7 +14473,7 @@ fn zirOverflowArithmetic( break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } - const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, sema.mod); + const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, mod); break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result }; } } @@ -14917,7 +14891,7 @@ fn analyzeArithmetic( } if (is_int) { var overflow_idx: ?usize = null; - const product = try lhs_val.intMul(rhs_val, resolved_type, &overflow_idx, sema.arena, sema.mod); + const product = try lhs_val.intMul(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); if (overflow_idx) |vec_idx| { return sema.failWithIntegerOverflow(block, src, resolved_type, product, vec_idx); } @@ -14925,7 +14899,7 @@ fn analyzeArithmetic( } else { return sema.addConstant( resolved_type, - try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, sema.mod), + try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, mod), ); } } else break :rs .{ .src = lhs_src, .air_tag = air_tag }; @@ -14975,7 +14949,7 @@ fn analyzeArithmetic( } return sema.addConstant( resolved_type, - try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, sema.mod), + try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, mod), ); } else break :rs .{ .src = lhs_src, .air_tag = air_tag }; } else break :rs .{ .src = rhs_src, .air_tag = air_tag }; @@ -15023,9 +14997,9 @@ fn analyzeArithmetic( } const val = if (scalar_tag == .ComptimeInt) - try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, sema.mod) + try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, mod) else - try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, sema.mod); + try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, mod); return sema.addConstant(resolved_type, val); } else break :rs .{ .src = lhs_src, .air_tag = .mul_sat }; @@ -15118,7 +15092,7 @@ fn analyzePtrArithmetic( // non zero). const new_align = @as(u32, 1) << @intCast(u5, @ctz(addend | ptr_info.@"align")); - break :t try Type.ptr(sema.arena, sema.mod, .{ + break :t try Type.ptr(sema.arena, mod, .{ .pointee_type = ptr_info.pointee_type, .sentinel = ptr_info.sentinel, .@"align" = new_align, @@ -15150,7 +15124,7 @@ fn analyzePtrArithmetic( if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } - const new_ptr_val = try ptr_val.elemPtr(new_ptr_ty, offset_int, sema.mod); + const new_ptr_val = try ptr_val.elemPtr(new_ptr_ty, offset_int, mod); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; @@ -15382,7 +15356,7 @@ fn zirCmpEq( if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) { const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty; - return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(sema.mod)}); + return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(mod)}); } if (lhs_ty_tag == .Union and (rhs_ty_tag == .EnumLiteral or rhs_ty_tag == .Enum)) { @@ -15419,7 +15393,7 @@ fn zirCmpEq( if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) { const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs); - if (lhs_as_type.eql(rhs_as_type, sema.mod) == (op == .eq)) { + if (lhs_as_type.eql(rhs_as_type, mod) == (op == .eq)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -15444,7 +15418,7 @@ fn analyzeCmpUnionTag( const msg = msg: { const msg = try sema.errMsg(block, un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); errdefer msg.destroy(sema.gpa); - try sema.mod.errNoteNonLazy(union_ty.declSrcLoc(sema.mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(sema.mod)}); + try mod.errNoteNonLazy(union_ty.declSrcLoc(mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(mod)}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -15456,7 +15430,7 @@ fn analyzeCmpUnionTag( if (try sema.resolveMaybeUndefVal(coerced_tag)) |enum_val| { if (enum_val.isUndef(mod)) return sema.addConstUndef(Type.bool); - const field_ty = union_ty.unionFieldType(enum_val, sema.mod); + const field_ty = union_ty.unionFieldType(enum_val, mod); if (field_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } @@ -15524,7 +15498,7 @@ fn analyzeCmp( const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) { return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{ - compareOperatorName(op), resolved_type.fmt(sema.mod), + compareOperatorName(op), resolved_type.fmt(mod), }); } const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -15634,7 +15608,7 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .Undefined, .Null, .Opaque, - => return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(sema.mod)}), + => return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(mod)}), .Type, .EnumLiteral, @@ -15677,7 +15651,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Undefined, .Null, .Opaque, - => return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(sema.mod)}), + => return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(mod)}), .Type, .EnumLiteral, @@ -17163,7 +17137,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi block, src, "bit shifting operation expected integer type, found '{}'", - .{operand.fmt(sema.mod)}, + .{operand.fmt(mod)}, ); } @@ -17395,7 +17369,7 @@ fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { switch (ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion, .Undefined => return, else => return sema.fail(block, src, "expected error union type, found '{}'", .{ - ty.fmt(sema.mod), + ty.fmt(mod), }), } } @@ -17521,7 +17495,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! const mod = sema.mod; if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); @@ -17568,7 +17542,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const mod = sema.mod; if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); @@ -17590,7 +17564,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand_ty = sema.typeOf(operand); const ptr_info = operand_ty.ptrInfo(mod); - const res_ty = try Type.ptr(sema.arena, sema.mod, .{ + const res_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = err_union_ty.errorUnionPayload(mod), .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, @@ -17693,7 +17667,7 @@ fn zirRetErrValue( _ = try mod.getErrorValue(err_name); const src = inst_data.src(); // Return the error code from the function. - const error_set_type = try mod.singleErrorSetTypeNts(err_name); + const error_set_type = try mod.singleErrorSetType(err_name); const result_inst = try sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = err_name, @@ -18003,7 +17977,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (elem_ty.zigTypeTag(mod) == .NoReturn) return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{}); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); var extra_i = extra.end; @@ -18073,10 +18047,10 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } else if (inst_data.size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(block, elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src.toSrcLoc(src_decl, mod), elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); @@ -18273,7 +18247,7 @@ fn zirStructInit( return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, mod), sema.mod)) { + if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, mod), mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index); } }; @@ -18307,8 +18281,8 @@ fn zirStructInit( } if (is_ref) { - const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const target = mod.getTarget(); + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = resolved_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); @@ -18359,8 +18333,8 @@ fn finishStructInit( } } else { const field_name = anon_struct.names[i]; - const template = "missing struct field: {s}"; - const args = .{ip.stringToSlice(field_name)}; + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -18379,8 +18353,8 @@ fn finishStructInit( if (field.default_val == .none) { const field_name = struct_obj.fields.keys()[i]; - const template = "missing struct field: {s}"; - const args = .{ip.stringToSlice(field_name)}; + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -18396,12 +18370,12 @@ fn finishStructInit( if (root_msg) |msg| { if (mod.typeToStruct(struct_ty)) |struct_obj| { - const fqn = ip.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); + const fqn = try struct_obj.getFullyQualifiedName(mod); try mod.errNoteNonLazy( struct_obj.srcLoc(mod), msg, - "struct '{s}' declared here", - .{fqn}, + "struct '{}' declared here", + .{fqn.fmt(ip)}, ); } root_msg = null; @@ -18431,7 +18405,7 @@ fn finishStructInit( if (is_ref) { try sema.resolveStructLayout(struct_ty); const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = struct_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); @@ -18489,7 +18463,7 @@ fn zirStructInitAnon( const gop = fields.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { const msg = msg: { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); errdefer msg.destroy(gpa); @@ -18506,7 +18480,7 @@ fn zirStructInitAnon( field_ty.* = sema.typeOf(init).toIntern(); if (field_ty.toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); @@ -18542,7 +18516,7 @@ fn zirStructInitAnon( sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); const field_src = mod.initSrc(src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, src, field_src); unreachable; @@ -18551,8 +18525,8 @@ fn zirStructInitAnon( }; if (is_ref) { - const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const target = mod.getTarget(); + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = tuple_ty.toType(), .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); @@ -18563,7 +18537,7 @@ fn zirStructInitAnon( const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); extra_index = item.end; - const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const field_ptr_ty = try Type.ptr(sema.arena, mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), .pointee_type = field_ty.toType(), @@ -18617,7 +18591,7 @@ fn zirArrayInit( array_ty.elemType2(mod); resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); const elem_src = mod.initSrc(src.node_offset.x, decl, i); _ = try sema.coerce(block, elem_ty, resolved_arg, elem_src); unreachable; @@ -18653,7 +18627,7 @@ fn zirArrayInit( sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); const elem_src = mod.initSrc(src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, src, elem_src); unreachable; @@ -18663,8 +18637,8 @@ fn zirArrayInit( try sema.queueFullTypeResolution(array_ty); if (is_ref) { - const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const target = mod.getTarget(); + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = array_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); @@ -18672,7 +18646,7 @@ fn zirArrayInit( if (array_ty.isTuple(mod)) { for (resolved_args, 0..) |arg, i| { - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), .pointee_type = array_ty.structFieldType(i, mod), @@ -18686,7 +18660,7 @@ fn zirArrayInit( return sema.makePtrConst(block, alloc); } - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), .pointee_type = array_ty.elemType2(mod), @@ -18959,8 +18933,7 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { const err_name = sema.mod.intern_pool.indexToKey(val.toIntern()).err.name; - const bytes = sema.mod.intern_pool.stringToSlice(err_name); - return sema.addStrLit(block, bytes); + return sema.addStrLit(block, sema.mod.intern_pool.stringToSlice(err_name)); } // Similar to zirTagName, we have special AIR instruction for the error name in case an optimimzation pass @@ -19051,8 +19024,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); const tag_name = ip.indexToKey(val.toIntern()).enum_literal; - const bytes = ip.stringToSlice(tag_name); - return sema.addStrLit(block, bytes); + return sema.addStrLit(block, ip.stringToSlice(tag_name)); }, .Enum => operand_ty, .Union => operand_ty.unionTagType(mod) orelse { @@ -19083,8 +19055,8 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const field_index = enum_ty.enumTagFieldIndex(val, mod) orelse { const enum_decl = mod.declPtr(enum_decl_index); const msg = msg: { - const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{s}'", .{ - val.fmtValue(enum_ty, sema.mod), ip.stringToSlice(enum_decl.name), + const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{}'", .{ + val.fmtValue(enum_ty, sema.mod), enum_decl.name.fmt(ip), }); errdefer msg.destroy(sema.gpa); try mod.errNoteNonLazy(enum_decl.srcLoc(mod), msg, "declared here", .{}); @@ -19374,8 +19346,8 @@ fn zirReify( _ = try mod.getErrorValue(name); const gop = names.getOrPutAssumeCapacity(name); if (gop.found_existing) { - return sema.fail(block, src, "duplicate error '{s}'", .{ - ip.stringToSlice(name), + return sema.fail(block, src, "duplicate error '{}'", .{ + name.fmt(ip), }); } } @@ -19487,8 +19459,8 @@ fn zirReify( if (!try sema.intFitsInType(value_val, int_tag_ty, null)) { // TODO: better source location - return sema.fail(block, src, "field '{s}' with enumeration value '{}' is too large for backing int type '{}'", .{ - ip.stringToSlice(field_name), + return sema.fail(block, src, "field '{}' with enumeration value '{}' is too large for backing int type '{}'", .{ + field_name.fmt(ip), value_val.fmtValue(Type.comptime_int, mod), int_tag_ty.fmt(mod), }); @@ -19496,8 +19468,8 @@ fn zirReify( if (try incomplete_enum.addFieldName(ip, gpa, field_name)) |other_index| { const msg = msg: { - const msg = try sema.errMsg(block, src, "duplicate enum field '{s}'", .{ - ip.stringToSlice(field_name), + const msg = try sema.errMsg(block, src, "duplicate enum field '{}'", .{ + field_name.fmt(ip), }); errdefer msg.destroy(gpa); _ = other_index; // TODO: this note is incorrect @@ -19690,7 +19662,10 @@ fn zirReify( const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { - const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ ip.stringToSlice(field_name), union_obj.tag_ty.fmt(mod) }); + const msg = try sema.errMsg(block, src, "no field named '{}' in enum '{}'", .{ + field_name.fmt(ip), + union_obj.tag_ty.fmt(mod), + }); errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; @@ -19706,7 +19681,7 @@ fn zirReify( const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { // TODO: better source location - return sema.fail(block, src, "duplicate union field {s}", .{ip.stringToSlice(field_name)}); + return sema.fail(block, src, "duplicate union field {}", .{field_name.fmt(ip)}); } const field_ty = type_val.toType(); @@ -19762,8 +19737,8 @@ fn zirReify( const enum_ty = union_obj.tag_ty; for (tag_info.names, 0..) |field_name, field_index| { if (explicit_tags_seen[field_index]) continue; - try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{ - ip.stringToSlice(field_name), + try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{}' missing, declared here", .{ + field_name.fmt(ip), }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); @@ -19981,14 +19956,12 @@ fn reifyStruct( const field_name = try name_val.toIpString(Type.slice_const_u8, mod); if (is_tuple) { - const field_index = std.fmt.parseUnsigned(u32, ip.stringToSlice(field_name), 10) catch { - return sema.fail( - block, - src, - "tuple cannot have non-numeric field '{s}'", - .{ip.stringToSlice(field_name)}, - ); - }; + const field_index = field_name.toUnsigned(ip) orelse return sema.fail( + block, + src, + "tuple cannot have non-numeric field '{}'", + .{field_name.fmt(ip)}, + ); if (field_index >= fields_len) { return sema.fail( @@ -20002,7 +19975,7 @@ fn reifyStruct( const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { // TODO: better source location - return sema.fail(block, src, "duplicate struct field {s}", .{ip.stringToSlice(field_name)}); + return sema.fail(block, src, "duplicate struct field {}", .{field_name.fmt(ip)}); } const field_ty = type_val.toType(); @@ -20443,14 +20416,14 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (maybe_operand_val) |val| { if (!dest_ty.isAnyError(mod)) { - const error_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(val.toIntern()).err.name); - if (!dest_ty.errorSetHasField(error_name, mod)) { + const error_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; + if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), error_name)) { const msg = msg: { const msg = try sema.errMsg( block, src, - "'error.{s}' not a member of error set '{}'", - .{ error_name, dest_ty.fmt(sema.mod) }, + "'error.{}' not a member of error set '{}'", + .{ error_name.fmt(ip), dest_ty.fmt(sema.mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -21448,7 +21421,7 @@ fn resolveExportOptions( block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, -) CompileError!std.builtin.ExportOptions { +) CompileError!Module.Export.Options { const mod = sema.mod; const gpa = sema.gpa; const ip = &mod.intern_pool; @@ -21492,10 +21465,10 @@ fn resolveExportOptions( }); } - return std.builtin.ExportOptions{ - .name = name, + return .{ + .name = try ip.getOrPutString(gpa, name), .linkage = linkage, - .section = section, + .section = try ip.getOrPutStringOpt(gpa, section), .visibility = visibility, }; } @@ -22391,9 +22364,9 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const msg = try sema.errMsg( block, src, - "field '{s}' has index '{d}' but pointer value is index '{d}' of struct '{}'", + "field '{}' has index '{d}' but pointer value is index '{d}' of struct '{}'", .{ - ip.stringToSlice(field_name), + field_name.fmt(ip), field_index, field.index, parent_ty.fmt(sema.mod), @@ -23440,7 +23413,12 @@ fn resolveExternOptions( block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, -) CompileError!std.builtin.ExternOptions { +) CompileError!struct { + name: InternPool.NullTerminatedString, + library_name: InternPool.OptionalNullTerminatedString = .none, + linkage: std.builtin.GlobalLinkage = .Strong, + is_thread_local: bool = false, +} { const mod = sema.mod; const gpa = sema.gpa; const ip = &mod.intern_pool; @@ -23483,9 +23461,9 @@ fn resolveExternOptions( return sema.fail(block, linkage_src, "extern symbol must use strong or weak linkage", .{}); } - return std.builtin.ExternOptions{ - .name = name, - .library_name = library_name, + return .{ + .name = try ip.getOrPutString(gpa, name), + .library_name = try ip.getOrPutStringOpt(gpa, library_name), .linkage = linkage, .is_thread_local = is_thread_local_val.toBool(), }; @@ -23533,7 +23511,7 @@ fn zirBuiltinExtern( const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); errdefer mod.destroyDecl(new_decl_index); const new_decl = mod.declPtr(new_decl_index); - new_decl.name = try mod.intern_pool.getOrPutString(sema.gpa, options.name); + new_decl.name = options.name; { const new_var = try mod.intern(.{ .variable = .{ @@ -24459,8 +24437,8 @@ fn fieldVal( return sema.fail( block, field_name_src, - "no member named '{s}' in '{}'", - .{ ip.stringToSlice(field_name), object_ty.fmt(mod) }, + "no member named '{}' in '{}'", + .{ field_name.fmt(ip), object_ty.fmt(mod) }, ); } }, @@ -24483,8 +24461,8 @@ fn fieldVal( return sema.fail( block, field_name_src, - "no member named '{s}' in '{}'", - .{ ip.stringToSlice(field_name), object_ty.fmt(mod) }, + "no member named '{}' in '{}'", + .{ field_name.fmt(ip), object_ty.fmt(mod) }, ); } } @@ -24504,8 +24482,8 @@ fn fieldVal( .error_set_type => |error_set_type| blk: { if (error_set_type.nameIndex(ip, field_name) != null) break :blk; const msg = msg: { - const msg = try sema.errMsg(block, src, "no error named '{s}' in '{}'", .{ - ip.stringToSlice(field_name), child_type.fmt(mod), + const msg = try sema.errMsg(block, src, "no error named '{}' in '{}'", .{ + field_name.fmt(ip), child_type.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, child_type); @@ -24526,7 +24504,7 @@ fn fieldVal( const error_set_type = if (!child_type.isAnyError(mod)) child_type else - try mod.singleErrorSetTypeNts(field_name); + try mod.singleErrorSetType(field_name); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), .name = field_name, @@ -24646,8 +24624,8 @@ fn fieldPtr( return sema.fail( block, field_name_src, - "no member named '{s}' in '{}'", - .{ ip.stringToSlice(field_name), object_ty.fmt(mod) }, + "no member named '{}' in '{}'", + .{ field_name.fmt(ip), object_ty.fmt(mod) }, ); } }, @@ -24705,8 +24683,8 @@ fn fieldPtr( return sema.fail( block, field_name_src, - "no member named '{s}' in '{}'", - .{ ip.stringToSlice(field_name), object_ty.fmt(mod) }, + "no member named '{}' in '{}'", + .{ field_name.fmt(ip), object_ty.fmt(mod) }, ); } }, @@ -24728,8 +24706,8 @@ fn fieldPtr( if (error_set_type.nameIndex(ip, field_name) != null) { break :blk; } - return sema.fail(block, src, "no error named '{s}' in '{}'", .{ - ip.stringToSlice(field_name), child_type.fmt(mod), + return sema.fail(block, src, "no error named '{}' in '{}'", .{ + field_name.fmt(ip), child_type.fmt(mod), }); }, .inferred_error_set_type => { @@ -24747,7 +24725,7 @@ fn fieldPtr( const error_set_type = if (!child_type.isAnyError(mod)) child_type else - try mod.singleErrorSetTypeNts(field_name); + try mod.singleErrorSetType(field_name); return sema.analyzeDeclRef(try anon_decl.finish( error_set_type, (try mod.intern(.{ .err = .{ @@ -24880,10 +24858,10 @@ fn fieldCallBind( if (ip.stringEqlSlice(field_name, "len")) { return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)) }; } - if (std.fmt.parseUnsigned(u32, ip.stringToSlice(field_name), 10)) |field_index| { + if (field_name.toUnsigned(ip)) |field_index| { if (field_index >= struct_ty.structFieldCount(mod)) break :find_field; return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index, mod), field_index, object_ptr); - } else |_| {} + } } else { const max = struct_ty.structFieldCount(mod); for (0..max) |i_usize| { @@ -24982,12 +24960,15 @@ fn fieldCallBind( }; const msg = msg: { - const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ ip.stringToSlice(field_name), concrete_ty.fmt(mod) }); + const msg = try sema.errMsg(block, src, "no field or member function named '{}' in '{}'", .{ + field_name.fmt(ip), + concrete_ty.fmt(mod), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, concrete_ty); if (found_decl) |decl_idx| { const decl = mod.declPtr(decl_idx); - try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{s}' is not a member function", .{ip.stringToSlice(field_name)}); + try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{}' is not a member function", .{field_name.fmt(ip)}); } break :msg msg; }; @@ -25047,8 +25028,8 @@ fn namespaceLookup( const decl = mod.declPtr(decl_index); if (!decl.is_pub and decl.getFileScope(mod) != block.getFileScope(mod)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "'{s}' is not marked 'pub'", .{ - mod.intern_pool.stringToSlice(decl_name), + const msg = try sema.errMsg(block, src, "'{}' is not marked 'pub'", .{ + decl_name.fmt(&mod.intern_pool), }); errdefer msg.destroy(gpa); try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "declared here", .{}); @@ -25299,21 +25280,20 @@ fn tupleFieldIndex( sema: *Sema, block: *Block, tuple_ty: Type, - field_name_ip: InternPool.NullTerminatedString, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!u32 { const mod = sema.mod; - const field_name = mod.intern_pool.stringToSlice(field_name_ip); - assert(!std.mem.eql(u8, field_name, "len")); - if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { + assert(!mod.intern_pool.stringEqlSlice(field_name, "len")); + if (field_name.toUnsigned(&mod.intern_pool)) |field_index| { if (field_index < tuple_ty.structFieldCount(mod)) return field_index; - return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{ - field_name, tuple_ty.fmt(mod), + return sema.fail(block, field_name_src, "index '{}' out of bounds of tuple '{}'", .{ + field_name.fmt(&mod.intern_pool), tuple_ty.fmt(mod), }); - } else |_| {} + } - return sema.fail(block, field_name_src, "no field named '{s}' in tuple '{}'", .{ - field_name, tuple_ty.fmt(mod), + return sema.fail(block, field_name_src, "no field named '{}' in tuple '{}'", .{ + field_name.fmt(&mod.intern_pool), tuple_ty.fmt(mod), }); } @@ -25389,8 +25369,8 @@ fn unionFieldPtr( const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{ - ip.stringToSlice(field_name), + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{ + field_name.fmt(ip), }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -25413,9 +25393,9 @@ fn unionFieldPtr( const msg = msg: { const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?; const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); - const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ - ip.stringToSlice(field_name), - ip.stringToSlice(active_field_name), + const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{ + field_name.fmt(ip), + active_field_name.fmt(ip), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); @@ -25486,8 +25466,8 @@ fn unionFieldVal( const msg = msg: { const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?; const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); - const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ - ip.stringToSlice(field_name), ip.stringToSlice(active_field_name), + const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{ + field_name.fmt(ip), active_field_name.fmt(ip), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); @@ -26595,8 +26575,8 @@ fn coerceExtra( const msg = try sema.errMsg( block, inst_src, - "no field named '{s}' in enum '{}'", - .{ mod.intern_pool.stringToSlice(string), dest_ty.fmt(mod) }, + "no field named '{}' in enum '{}'", + .{ string.fmt(&mod.intern_pool), dest_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -27051,9 +27031,8 @@ const InMemoryCoercionResult = union(enum) { break; }, .missing_error => |missing_errors| { - for (missing_errors) |err_index| { - const err = mod.intern_pool.stringToSlice(err_index); - try sema.errNote(block, src, msg, "'error.{s}' not a member of destination error set", .{err}); + for (missing_errors) |err| { + try sema.errNote(block, src, msg, "'error.{}' not a member of destination error set", .{err.fmt(&mod.intern_pool)}); } break; }, @@ -28016,7 +27995,12 @@ fn storePtrVal( .bad_decl_ty, .bad_ptr_ty => { // TODO show the decl declaration site in a note and explain whether the decl // or the pointer is the problematic type - return sema.fail(block, src, "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", .{mut_kit.ty.fmt(mod)}); + return sema.fail( + block, + src, + "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", + .{mut_kit.ty.fmt(mod)}, + ); }, } } @@ -28678,7 +28662,12 @@ fn beginComptimePtrLoad( .null_value => return sema.fail(block, src, "attempt to use null value", .{}), else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .error_union => |error_union| switch (error_union.val) { - .err_name => |err_name| return sema.fail(block, src, "attempt to unwrap error: {s}", .{mod.intern_pool.stringToSlice(err_name)}), + .err_name => |err_name| return sema.fail( + block, + src, + "attempt to unwrap error: {}", + .{err_name.fmt(&mod.intern_pool)}, + ), .payload => |payload| payload, }, .opt => |opt| switch (opt.val) { @@ -29077,8 +29066,8 @@ fn coerceEnumToUnion( errdefer msg.destroy(sema.gpa); const field_name = union_obj.fields.keys()[field_index]; - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{ - ip.stringToSlice(field_name), + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{ + field_name.fmt(ip), }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -29088,14 +29077,14 @@ fn coerceEnumToUnion( const opv = (try sema.typeHasOnePossibleValue(field_ty)) orelse { const msg = msg: { const field_name = union_obj.fields.keys()[field_index]; - const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{s}'", .{ + const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{}'", .{ inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod), - field_ty.fmt(sema.mod), ip.stringToSlice(field_name), + field_ty.fmt(sema.mod), field_name.fmt(ip), }); errdefer msg.destroy(sema.gpa); - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{ - ip.stringToSlice(field_name), + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{ + field_name.fmt(ip), }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -29165,8 +29154,8 @@ fn coerceEnumToUnion( const field_name = field.key_ptr.*; const field_ty = field.value_ptr.ty; if (!(try sema.typeHasRuntimeBits(field_ty))) continue; - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' has type '{}'", .{ - ip.stringToSlice(field_name), + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{ + field_name.fmt(ip), field_ty.fmt(sema.mod), }); } @@ -29522,8 +29511,8 @@ fn coerceTupleToStruct( const field = fields.values()[i]; const field_src = inst_src; // TODO better source location if (field.default_val == .none) { - const template = "missing struct field: {s}"; - const args = .{ip.stringToSlice(field_name)}; + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, args); } else { @@ -29666,8 +29655,8 @@ fn coerceTupleToTuple( } continue; } - const template = "missing struct field: {s}"; - const args = .{ip.stringToSlice(tuple_ty.structFieldName(i, mod))}; + const template = "missing struct field: {}"; + const args = .{tuple_ty.structFieldName(i, mod).fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, args); } else { @@ -30097,7 +30086,7 @@ fn analyzeIsNonErrComptimeOnly( if (err_union.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - if (err_union.getError(mod) == null) { + if (err_union.getErrorName(mod) == .none) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -32824,15 +32813,16 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void extra_index += 1; // This string needs to outlive the ZIR code. - const field_name = try ip.getOrPutString(gpa, if (field_name_zir) |s| s else try std.fmt.allocPrint(sema.arena, "{d}", .{ - field_i, - })); + const field_name = try ip.getOrPutString(gpa, if (field_name_zir) |s| + s + else + try std.fmt.allocPrint(sema.arena, "{d}", .{field_i})); const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const msg = msg: { const field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i }).lazy; - const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{s}'", .{ip.stringToSlice(field_name)}); + const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{}'", .{field_name.fmt(ip)}); errdefer msg.destroy(gpa); const prev_field_index = struct_obj.fields.getIndex(field_name).?; @@ -33297,8 +33287,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { if (gop.found_existing) { const msg = msg: { const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; - const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{s}'", .{ - ip.stringToSlice(field_name), + const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{}'", .{ + field_name.fmt(ip), }); errdefer msg.destroy(gpa); @@ -33319,8 +33309,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .index = field_i, .range = .type, }).lazy; - const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{ - ip.stringToSlice(field_name), union_obj.tag_ty.fmt(mod), + const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{}' in enum '{}'", .{ + field_name.fmt(ip), union_obj.tag_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); @@ -33412,8 +33402,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const enum_ty = union_obj.tag_ty; for (tag_info.names, 0..) |field_name, field_index| { if (explicit_tags_seen[field_index]) continue; - try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{ - ip.stringToSlice(field_name), + try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{}' missing, declared here", .{ + field_name.fmt(ip), }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); @@ -33442,22 +33432,12 @@ fn generateUnionTagTypeNumbered( ) !Type { const mod = sema.mod; const gpa = sema.gpa; - const ip = &mod.intern_pool; const src_decl = mod.declPtr(block.src_decl); const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope); errdefer mod.destroyDecl(new_decl_index); - const name = name: { - const prefix = "@typeInfo("; - const fqn = ip.stringToSlice(try union_obj.getFullyQualifiedName(mod)); - const suffix = ").Union.tag_type.?"; - const start = ip.string_bytes.items.len; - try ip.string_bytes.ensureUnusedCapacity(gpa, prefix.len + suffix.len + fqn.len); - ip.string_bytes.appendSliceAssumeCapacity(prefix); - ip.string_bytes.appendSliceAssumeCapacity(fqn); - ip.string_bytes.appendSliceAssumeCapacity(suffix); - break :name try ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); - }; + const fqn = try union_obj.getFullyQualifiedName(mod); + const name = try mod.intern_pool.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(&mod.intern_pool)}); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.noreturn, .val = Value.@"unreachable", @@ -33496,7 +33476,6 @@ fn generateUnionTagTypeSimple( ) !Type { const mod = sema.mod; const gpa = sema.gpa; - const ip = &mod.intern_pool; const new_decl_index = new_decl_index: { const union_obj = maybe_union_obj orelse { @@ -33508,17 +33487,8 @@ fn generateUnionTagTypeSimple( const src_decl = mod.declPtr(block.src_decl); const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope); errdefer mod.destroyDecl(new_decl_index); - const name = name: { - const prefix = "@typeInfo("; - const fqn = ip.stringToSlice(try union_obj.getFullyQualifiedName(mod)); - const suffix = ").Union.tag_type.?"; - const start = ip.string_bytes.items.len; - try ip.string_bytes.ensureUnusedCapacity(gpa, prefix.len + suffix.len + fqn.len); - ip.string_bytes.appendSliceAssumeCapacity(prefix); - ip.string_bytes.appendSliceAssumeCapacity(fqn); - ip.string_bytes.appendSliceAssumeCapacity(suffix); - break :name try ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); - }; + const fqn = try union_obj.getFullyQualifiedName(mod); + const name = try mod.intern_pool.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(&mod.intern_pool)}); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.noreturn, .val = Value.@"unreachable", @@ -34456,8 +34426,8 @@ fn anonStructFieldIndex( }, else => unreachable, } - return sema.fail(block, field_src, "no field named '{s}' in anonymous struct '{}'", .{ - mod.intern_pool.stringToSlice(field_name), struct_ty.fmt(sema.mod), + return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{ + field_name.fmt(&mod.intern_pool), struct_ty.fmt(sema.mod), }); } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 7faff3af019e..ec76b52d2035 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -76,6 +76,7 @@ pub fn print( ) (@TypeOf(writer).Error || Allocator.Error)!void { var val = tv.val; var ty = tv.ty; + const ip = &mod.intern_pool; while (true) switch (val.ip_index) { .none => switch (val.tag()) { .aggregate => return printAggregate(ty, val, writer, level, mod), @@ -87,7 +88,7 @@ pub fn print( try writer.writeAll(".{ "); try print(.{ - .ty = mod.unionPtr(mod.intern_pool.indexToKey(ty.toIntern()).union_type.index).tag_ty, + .ty = mod.unionPtr(ip.indexToKey(ty.toIntern()).union_type.index).tag_ty, .val = union_val.tag, }, writer, level - 1, mod); try writer.writeAll(" = "); @@ -174,7 +175,7 @@ pub fn print( ty = ty.optionalChild(mod); }, }, - else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + else => switch (ip.indexToKey(val.toIntern())) { .int_type, .ptr_type, .array_type, @@ -200,11 +201,11 @@ pub fn print( else => return writer.writeAll(@tagName(simple_value)), }, .variable => return writer.writeAll("(variable)"), - .extern_func => |extern_func| return writer.print("(extern function '{s}')", .{ - mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name), + .extern_func => |extern_func| return writer.print("(extern function '{}')", .{ + mod.declPtr(extern_func.decl).name.fmt(ip), }), - .func => |func| return writer.print("(function '{s}')", .{ - mod.intern_pool.stringToSlice(mod.declPtr(mod.funcPtr(func.index).owner_decl).name), + .func => |func| return writer.print("(function '{}')", .{ + mod.declPtr(mod.funcPtr(func.index).owner_decl).name.fmt(ip), }), .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), @@ -215,29 +216,28 @@ pub fn print( lazy_ty.toType().abiSize(mod), }), }, - .err => |err| return writer.print("error.{s}", .{ - mod.intern_pool.stringToSlice(err.name), + .err => |err| return writer.print("error.{}", .{ + err.name.fmt(ip), }), .error_union => |error_union| switch (error_union.val) { - .err_name => |err_name| return writer.print("error.{s}", .{ - mod.intern_pool.stringToSlice(err_name), + .err_name => |err_name| return writer.print("error.{}", .{ + err_name.fmt(ip), }), .payload => |payload| { val = payload.toValue(); ty = ty.errorUnionPayload(mod); }, }, - .enum_literal => |enum_literal| return writer.print(".{s}", .{ - mod.intern_pool.stringToSlice(enum_literal), + .enum_literal => |enum_literal| return writer.print(".{}", .{ + enum_literal.fmt(ip), }), .enum_tag => |enum_tag| { if (level == 0) { return writer.writeAll("(enum)"); } - const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type; - if (enum_type.tagValueIndex(&mod.intern_pool, val.toIntern())) |tag_index| { - const tag_name = mod.intern_pool.stringToSlice(enum_type.names[tag_index]); - try writer.print(".{}", .{std.zig.fmtId(tag_name)}); + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; + if (enum_type.tagValueIndex(ip, val.toIntern())) |tag_index| { + try writer.print(".{i}", .{enum_type.names[tag_index].fmt(ip)}); return; } try writer.writeAll("@intToEnum("); @@ -247,7 +247,7 @@ pub fn print( }, writer, level - 1, mod); try writer.writeAll(", "); try print(.{ - .ty = mod.intern_pool.typeOf(enum_tag.int).toType(), + .ty = ip.typeOf(enum_tag.int).toType(), .val = enum_tag.int.toValue(), }, writer, level - 1, mod); try writer.writeAll(")"); @@ -259,13 +259,13 @@ pub fn print( }, .ptr => |ptr| { if (ptr.addr == .int) { - const i = mod.intern_pool.indexToKey(ptr.addr.int).int; + const i = ip.indexToKey(ptr.addr.int).int; switch (i.storage) { inline else => |addr| return writer.print("{x:0>8}", .{addr}), } } - const ptr_ty = mod.intern_pool.indexToKey(ty.toIntern()).ptr_type; + const ptr_ty = ip.indexToKey(ty.toIntern()).ptr_type; if (ptr_ty.flags.size == .Slice) { if (level == 0) { return writer.writeAll(".{ ... }"); @@ -301,7 +301,7 @@ pub fn print( switch (ptr.addr) { .decl => |decl_index| { const decl = mod.declPtr(decl_index); - if (level == 0) return writer.print("(decl '{s}')", .{mod.intern_pool.stringToSlice(decl.name)}); + if (level == 0) return writer.print("(decl '{}')", .{decl.name.fmt(ip)}); return print(.{ .ty = decl.ty, .val = decl.val, @@ -309,7 +309,7 @@ pub fn print( }, .mut_decl => |mut_decl| { const decl = mod.declPtr(mut_decl.decl); - if (level == 0) return writer.print("(mut decl '{s}')", .{mod.intern_pool.stringToSlice(decl.name)}); + if (level == 0) return writer.print("(mut decl '{}')", .{decl.name.fmt(ip)}); return print(.{ .ty = decl.ty, .val = decl.val, @@ -317,7 +317,7 @@ pub fn print( }, .comptime_field => |field_val_ip| { return print(.{ - .ty = mod.intern_pool.typeOf(field_val_ip).toType(), + .ty = ip.typeOf(field_val_ip).toType(), .val = field_val_ip.toValue(), }, writer, level - 1, mod); }, @@ -325,27 +325,27 @@ pub fn print( .eu_payload => |eu_ip| { try writer.writeAll("(payload of "); try print(.{ - .ty = mod.intern_pool.typeOf(eu_ip).toType(), + .ty = ip.typeOf(eu_ip).toType(), .val = eu_ip.toValue(), }, writer, level - 1, mod); try writer.writeAll(")"); }, .opt_payload => |opt_ip| { try print(.{ - .ty = mod.intern_pool.typeOf(opt_ip).toType(), + .ty = ip.typeOf(opt_ip).toType(), .val = opt_ip.toValue(), }, writer, level - 1, mod); try writer.writeAll(".?"); }, .elem => |elem| { try print(.{ - .ty = mod.intern_pool.typeOf(elem.base).toType(), + .ty = ip.typeOf(elem.base).toType(), .val = elem.base.toValue(), }, writer, level - 1, mod); try writer.print("[{}]", .{elem.index}); }, .field => |field| { - const container_ty = mod.intern_pool.typeOf(field.base).toType(); + const container_ty = ip.typeOf(field.base).toType(); try print(.{ .ty = container_ty, .val = field.base.toValue(), @@ -356,14 +356,12 @@ pub fn print( if (container_ty.isTuple(mod)) { try writer.print("[{d}]", .{field.index}); } - const field_name_ip = container_ty.structFieldName(@intCast(usize, field.index), mod); - const field_name = mod.intern_pool.stringToSlice(field_name_ip); - try writer.print(".{}", .{std.zig.fmtId(field_name)}); + const field_name = container_ty.structFieldName(@intCast(usize, field.index), mod); + try writer.print(".{i}", .{field_name.fmt(ip)}); }, .Union => { - const field_name_ip = container_ty.unionFields(mod).keys()[@intCast(usize, field.index)]; - const field_name = mod.intern_pool.stringToSlice(field_name_ip); - try writer.print(".{}", .{std.zig.fmtId(field_name)}); + const field_name = container_ty.unionFields(mod).keys()[@intCast(usize, field.index)]; + try writer.print(".{i}", .{field_name.fmt(ip)}); }, .Pointer => { std.debug.assert(container_ty.isSlice(mod)); @@ -440,9 +438,7 @@ fn printAggregate( else => unreachable, }; - if (field_name.unwrap()) |name_ip| try writer.print(".{s} = ", .{ - mod.intern_pool.stringToSlice(name_ip), - }); + if (field_name.unwrap()) |name| try writer.print(".{} = ", .{name.fmt(&mod.intern_pool)}); try print(.{ .ty = ty.structFieldType(i, mod), .val = try val.fieldValue(mod, i), diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 8d2ba2bbb880..c1b7bd72b1ba 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1850,9 +1850,9 @@ pub const DeclGen = struct { try mod.markDeclAlive(decl); if (mod.decl_exports.get(decl_index)) |exports| { - try writer.writeAll(mod.intern_pool.stringToSlice(exports.items[export_index].name)); + try writer.print("{}", .{exports.items[export_index].opts.name.fmt(&mod.intern_pool)}); } else if (decl.isExtern(mod)) { - try writer.writeAll(mod.intern_pool.stringToSlice(decl.name)); + try writer.print("{}", .{decl.name.fmt(&mod.intern_pool)}); } else { // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), // expand to 3x the length of its input, but let's cut it off at a much shorter limit. @@ -2481,8 +2481,8 @@ fn genExports(o: *Object) !void { try fwd_decl_writer.writeAll("zig_export("); try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @intCast(u32, i) }); try fwd_decl_writer.print(", {s}, {s});\n", .{ - fmtStringLiteral(ip.stringToSlice(exports.items[0].name), null), - fmtStringLiteral(ip.stringToSlice(@"export".name), null), + fmtStringLiteral(ip.stringToSlice(exports.items[0].opts.name), null), + fmtStringLiteral(ip.stringToSlice(@"export".opts.name), null), }); } } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8eb5f8c6833f..47be4148d3b0 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -687,11 +687,9 @@ pub const Object = struct { for (export_list.items) |exp| { // Detect if the LLVM global has already been created as an extern. In such // case, we need to replace all uses of it with this exported global. - // TODO update std.builtin.ExportOptions to have the name be a - // null-terminated slice. - const exp_name_z = mod.intern_pool.stringToSlice(exp.name); + const exp_name = mod.intern_pool.stringToSlice(exp.opts.name); - const other_global = object.getLlvmGlobal(exp_name_z.ptr) orelse continue; + const other_global = object.getLlvmGlobal(exp_name.ptr) orelse continue; if (other_global == llvm_global) continue; other_global.replaceAllUsesWith(llvm_global); @@ -1320,7 +1318,7 @@ pub const Object = struct { } } } else if (exports.len != 0) { - const exp_name = mod.intern_pool.stringToSlice(exports[0].name); + const exp_name = mod.intern_pool.stringToSlice(exports[0].opts.name); llvm_global.setValueName2(exp_name.ptr, exp_name.len); llvm_global.setUnnamedAddr(.False); if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); @@ -1335,18 +1333,18 @@ pub const Object = struct { di_global.replaceLinkageName(linkage_name); } } - switch (exports[0].linkage) { + switch (exports[0].opts.linkage) { .Internal => unreachable, .Strong => llvm_global.setLinkage(.External), .Weak => llvm_global.setLinkage(.WeakODR), .LinkOnce => llvm_global.setLinkage(.LinkOnceODR), } - switch (exports[0].visibility) { + switch (exports[0].opts.visibility) { .default => llvm_global.setVisibility(.Default), .hidden => llvm_global.setVisibility(.Hidden), .protected => llvm_global.setVisibility(.Protected), } - if (mod.intern_pool.stringToSliceUnwrap(exports[0].section)) |section| { + if (mod.intern_pool.stringToSliceUnwrap(exports[0].opts.section)) |section| { llvm_global.setSection(section); } if (decl.val.getVariable(mod)) |variable| { @@ -1362,7 +1360,7 @@ pub const Object = struct { // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. for (exports[1..]) |exp| { - const exp_name_z = mod.intern_pool.stringToSlice(exp.name); + const exp_name_z = mod.intern_pool.stringToSlice(exp.opts.name); if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| { alias.setAliasee(llvm_global); @@ -2539,10 +2537,10 @@ pub const DeclGen = struct { const fn_type = try dg.lowerType(zig_fn_type); - const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); + const fqn = try decl.getFullyQualifiedName(mod); const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace); + const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(mod.intern_pool.stringToSlice(fqn), fn_type, llvm_addrspace); gop.value_ptr.* = llvm_fn; const is_extern = decl.isExtern(mod); @@ -2693,7 +2691,7 @@ pub const DeclGen = struct { const mod = dg.module; const decl = mod.declPtr(decl_index); - const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); + const fqn = try decl.getFullyQualifiedName(mod); const target = mod.getTarget(); @@ -2702,7 +2700,7 @@ pub const DeclGen = struct { const llvm_global = dg.object.llvm_module.addGlobalInAddressSpace( llvm_type, - fqn, + mod.intern_pool.stringToSlice(fqn), llvm_actual_addrspace, ); gop.value_ptr.* = llvm_global; @@ -5942,6 +5940,8 @@ pub const FuncGen = struct { .base_line = self.base_line, }); + const fqn = try decl.getFullyQualifiedName(mod); + const is_internal_linkage = !mod.decl_exports.contains(decl_index); const fn_ty = try mod.funcType(.{ .param_types = &.{}, @@ -5959,11 +5959,10 @@ pub const FuncGen = struct { .addrspace_is_generic = false, }); const fn_di_ty = try self.dg.object.lowerDebugType(fn_ty, .full); - const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const subprogram = dib.createFunction( di_file.toScope(), mod.intern_pool.stringToSlice(decl.name), - fqn, + mod.intern_pool.stringToSlice(fqn), di_file, line_number, fn_di_ty, @@ -8661,8 +8660,8 @@ pub const FuncGen = struct { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = mod.intern_pool.stringToSlice(try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod)); - const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); + const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); + const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{}", .{fqn.fmt(&mod.intern_pool)}); const param_types = [_]*llvm.Type{try self.dg.lowerType(enum_type.tag_ty.toType())}; @@ -8733,8 +8732,8 @@ pub const FuncGen = struct { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = mod.intern_pool.stringToSlice(try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod)); - const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); + const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); + const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{fqn.fmt(&mod.intern_pool)}); const slice_ty = Type.slice_const_u8_sentinel_0; const llvm_ret_ty = try self.dg.lowerType(slice_ty); diff --git a/src/link/C.zig b/src/link/C.zig index 8bfaf1553ce2..9a42daa0610f 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -294,7 +294,7 @@ pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !vo defer export_names.deinit(gpa); try export_names.ensureTotalCapacity(gpa, @intCast(u32, module.decl_exports.entries.len)); for (module.decl_exports.values()) |exports| for (exports.items) |@"export"| - try export_names.put(gpa, @"export".name, {}); + try export_names.put(gpa, @"export".opts.name, {}); while (f.remaining_decls.popOrNull()) |kv| { const decl_index = kv.key; diff --git a/src/link/Coff.zig b/src/link/Coff.zig index fec6a86b9133..f7785858ddad 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1430,20 +1430,20 @@ pub fn updateDeclExports( else => std.builtin.CallingConvention.C, }; const decl_cc = exported_decl.ty.fnCallingConvention(mod); - if (decl_cc == .C and ip.stringEqlSlice(exp.name, "main") and + if (decl_cc == .C and ip.stringEqlSlice(exp.opts.name, "main") and self.base.options.link_libc) { mod.stage1_flags.have_c_main = true; } else if (decl_cc == winapi_cc and self.base.options.target.os.tag == .windows) { - if (ip.stringEqlSlice(exp.name, "WinMain")) { + if (ip.stringEqlSlice(exp.opts.name, "WinMain")) { mod.stage1_flags.have_winmain = true; - } else if (ip.stringEqlSlice(exp.name, "wWinMain")) { + } else if (ip.stringEqlSlice(exp.opts.name, "wWinMain")) { mod.stage1_flags.have_wwinmain = true; - } else if (ip.stringEqlSlice(exp.name, "WinMainCRTStartup")) { + } else if (ip.stringEqlSlice(exp.opts.name, "WinMainCRTStartup")) { mod.stage1_flags.have_winmain_crt_startup = true; - } else if (ip.stringEqlSlice(exp.name, "wWinMainCRTStartup")) { + } else if (ip.stringEqlSlice(exp.opts.name, "wWinMainCRTStartup")) { mod.stage1_flags.have_wwinmain_crt_startup = true; - } else if (ip.stringEqlSlice(exp.name, "DllMainCRTStartup")) { + } else if (ip.stringEqlSlice(exp.opts.name, "DllMainCRTStartup")) { mod.stage1_flags.have_dllmain_crt_startup = true; } } @@ -1461,10 +1461,9 @@ pub fn updateDeclExports( const decl_metadata = self.decls.getPtr(decl_index).?; for (exports) |exp| { - const exp_name = mod.intern_pool.stringToSlice(exp.name); - log.debug("adding new export '{s}'", .{exp_name}); + log.debug("adding new export '{}'", .{exp.opts.name.fmt(&mod.intern_pool)}); - if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section_name| { + if (mod.intern_pool.stringToSliceUnwrap(exp.opts.section)) |section_name| { if (!mem.eql(u8, section_name, ".text")) { try mod.failed_exports.putNoClobber( gpa, @@ -1480,7 +1479,7 @@ pub fn updateDeclExports( } } - if (exp.linkage == .LinkOnce) { + if (exp.opts.linkage == .LinkOnce) { try mod.failed_exports.putNoClobber( gpa, exp, @@ -1494,19 +1493,19 @@ pub fn updateDeclExports( continue; } - const sym_index = decl_metadata.getExport(self, exp_name) orelse blk: { + const sym_index = decl_metadata.getExport(self, mod.intern_pool.stringToSlice(exp.opts.name)) orelse blk: { const sym_index = try self.allocateSymbol(); try decl_metadata.exports.append(gpa, sym_index); break :blk sym_index; }; const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null }; const sym = self.getSymbolPtr(sym_loc); - try self.setSymbolName(sym, exp_name); + try self.setSymbolName(sym, mod.intern_pool.stringToSlice(exp.opts.name)); sym.value = decl_sym.value; sym.section_number = @intToEnum(coff.SectionNumber, self.text_section_index.? + 1); sym.type = .{ .complex_type = .FUNCTION, .base_type = .NULL }; - switch (exp.linkage) { + switch (exp.opts.linkage) { .Strong => { sym.storage_class = .EXTERNAL; }, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 5ac90d4cae17..409eca6e7a09 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2879,9 +2879,9 @@ pub fn updateDeclExports( try self.global_symbols.ensureUnusedCapacity(gpa, exports.len); for (exports) |exp| { - const exp_name = mod.intern_pool.stringToSlice(exp.name); - if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section_name| { - if (!mem.eql(u8, section_name, ".text")) { + const exp_name = mod.intern_pool.stringToSlice(exp.opts.name); + if (exp.opts.section.unwrap()) |section_name| { + if (!mod.intern_pool.stringEqlSlice(section_name, ".text")) { try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); mod.failed_exports.putAssumeCapacityNoClobber( exp, @@ -2890,7 +2890,7 @@ pub fn updateDeclExports( continue; } } - const stb_bits: u8 = switch (exp.linkage) { + const stb_bits: u8 = switch (exp.opts.linkage) { .Internal => elf.STB_LOCAL, .Strong => blk: { const entry_name = self.base.options.entry orelse "_start"; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 70993e8dc6d8..a3f67bc70a4d 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -2401,15 +2401,15 @@ pub fn updateDeclExports( const decl_metadata = self.decls.getPtr(decl_index).?; for (exports) |exp| { - const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{ - mod.intern_pool.stringToSlice(exp.name), + const exp_name = try std.fmt.allocPrint(gpa, "_{}", .{ + exp.opts.name.fmt(&mod.intern_pool), }); defer gpa.free(exp_name); log.debug("adding new export '{s}'", .{exp_name}); - if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section_name| { - if (!mem.eql(u8, section_name, "__text")) { + if (exp.opts.section.unwrap()) |section_name| { + if (!mod.intern_pool.stringEqlSlice(section_name, "__text")) { try mod.failed_exports.putNoClobber( mod.gpa, exp, @@ -2424,7 +2424,7 @@ pub fn updateDeclExports( } } - if (exp.linkage == .LinkOnce) { + if (exp.opts.linkage == .LinkOnce) { try mod.failed_exports.putNoClobber( mod.gpa, exp, @@ -2453,7 +2453,7 @@ pub fn updateDeclExports( .n_value = decl_sym.n_value, }; - switch (exp.linkage) { + switch (exp.opts.linkage) { .Internal => { // Symbol should be hidden, or in MachO lingo, private extern. // We should also mark the symbol as Weak: n_desc == N_WEAK_DEF. diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 5cf2add528d5..2606dd7aac78 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -725,10 +725,10 @@ fn addDeclExports( const decl_block = self.getDeclBlock(metadata.index); for (exports) |exp| { - const exp_name = mod.intern_pool.stringToSlice(exp.name); + const exp_name = mod.intern_pool.stringToSlice(exp.opts.name); // plan9 does not support custom sections - if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section_name| { - if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) { + if (exp.opts.section.unwrap()) |section_name| { + if (!mod.intern_pool.stringEqlSlice(section_name, ".text") and !mod.intern_pool.stringEqlSlice(section_name, ".data")) { try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( self.base.allocator, mod.declPtr(decl_index).srcLoc(mod), @@ -972,7 +972,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const sym = self.syms.items[decl_block.sym_index.?]; try self.writeSym(writer, sym); if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| { - for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.name))) |exp_i| { + for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.opts.name))) |exp_i| { try self.writeSym(writer, self.syms.items[exp_i]); }; } @@ -998,7 +998,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const sym = self.syms.items[decl_block.sym_index.?]; try self.writeSym(writer, sym); if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| { - for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.name))) |exp_i| { + for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.opts.name))) |exp_i| { const s = self.syms.items[exp_i]; if (mem.eql(u8, s.name, "_start")) self.entry_val = s.value; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index fbde464c5457..5bbd5ebdc055 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -147,7 +147,7 @@ pub fn updateDeclExports( const spv_decl_index = entry.value_ptr.*; for (exports) |exp| { - try self.spv.declareEntryPoint(spv_decl_index, mod.intern_pool.stringToSlice(exp.name)); + try self.spv.declareEntryPoint(spv_decl_index, mod.intern_pool.stringToSlice(exp.opts.name)); } } diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 35972d2fec1b..fdac7dfa634e 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1706,7 +1706,7 @@ pub fn updateDeclExports( const gpa = mod.gpa; for (exports) |exp| { - if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section| { + if (mod.intern_pool.stringToSliceUnwrap(exp.opts.section)) |section| { try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( gpa, decl.srcLoc(mod), @@ -1716,12 +1716,12 @@ pub fn updateDeclExports( continue; } - const export_name = try wasm.string_table.put(wasm.base.allocator, mod.intern_pool.stringToSlice(exp.name)); + const export_name = try wasm.string_table.put(wasm.base.allocator, mod.intern_pool.stringToSlice(exp.opts.name)); if (wasm.globals.getPtr(export_name)) |existing_loc| { if (existing_loc.index == atom.sym_index) continue; const existing_sym: Symbol = existing_loc.getSymbol(wasm).*; - const exp_is_weak = exp.linkage == .Internal or exp.linkage == .Weak; + const exp_is_weak = exp.opts.linkage == .Internal or exp.opts.linkage == .Weak; // When both the to-be-exported symbol and the already existing symbol // are strong symbols, we have a linker error. // In the other case we replace one with the other. @@ -1729,11 +1729,11 @@ pub fn updateDeclExports( try mod.failed_exports.put(gpa, exp, try Module.ErrorMsg.create( gpa, decl.srcLoc(mod), - \\LinkError: symbol '{s}' defined multiple times + \\LinkError: symbol '{}' defined multiple times \\ first definition in '{s}' \\ next definition in '{s}' , - .{ mod.intern_pool.stringToSlice(exp.name), wasm.name, wasm.name }, + .{ exp.opts.name.fmt(&mod.intern_pool), wasm.name, wasm.name }, )); continue; } else if (exp_is_weak) { @@ -1750,7 +1750,7 @@ pub fn updateDeclExports( const exported_atom = wasm.getAtom(exported_atom_index); const sym_loc = exported_atom.symbolLoc(); const symbol = sym_loc.getSymbol(wasm); - switch (exp.linkage) { + switch (exp.opts.linkage) { .Internal => { symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); }, @@ -1769,7 +1769,7 @@ pub fn updateDeclExports( }, } // Ensure the symbol will be exported using the given name - if (!mod.intern_pool.stringEqlSlice(exp.name, sym_loc.getName(wasm))) { + if (!mod.intern_pool.stringEqlSlice(exp.opts.name, sym_loc.getName(wasm))) { try wasm.export_names.put(wasm.base.allocator, sym_loc, export_name); } diff --git a/src/print_air.zig b/src/print_air.zig index eb104772920a..f963ecdd9524 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -685,9 +685,8 @@ const Writer = struct { fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_fn = w.air.instructions.items(.data)[inst].ty_fn; const func_index = ty_fn.func; - const ip = &w.module.intern_pool; const owner_decl = w.module.declPtr(w.module.funcPtr(func_index).owner_decl); - try s.print("{s}", .{ip.stringToSlice(owner_decl.name)}); + try s.print("{}", .{owner_decl.name.fmt(&w.module.intern_pool)}); } fn writeDbgVar(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { diff --git a/src/type.zig b/src/type.zig index f9065a2e39d7..4269ee56d394 100644 --- a/src/type.zig +++ b/src/type.zig @@ -284,7 +284,7 @@ pub const Type = struct { try writer.writeAll("error{"); for (names, 0..) |name, i| { if (i != 0) try writer.writeByte(','); - try writer.writeAll(mod.intern_pool.stringToSlice(name)); + try writer.print("{}", .{name.fmt(&mod.intern_pool)}); } try writer.writeAll("}"); }, @@ -341,7 +341,7 @@ pub const Type = struct { try decl.renderFullyQualifiedName(mod, writer); } else if (struct_type.namespace.unwrap()) |namespace_index| { const namespace = mod.namespacePtr(namespace_index); - try namespace.renderFullyQualifiedName(mod, "", writer); + try namespace.renderFullyQualifiedName(mod, .empty, writer); } else { try writer.writeAll("@TypeOf(.{})"); } @@ -357,9 +357,7 @@ pub const Type = struct { try writer.writeAll("comptime "); } if (anon_struct.names.len != 0) { - const name = mod.intern_pool.stringToSlice(anon_struct.names[i]); - try writer.writeAll(name); - try writer.writeAll(": "); + try writer.print("{}: ", .{anon_struct.names[i].fmt(&mod.intern_pool)}); } try print(field_ty.toType(), writer, mod); diff --git a/src/value.zig b/src/value.zig index dbf25324e5cc..d3f15121b85f 100644 --- a/src/value.zig +++ b/src/value.zig @@ -525,23 +525,6 @@ pub const Value = struct { }; } - pub fn tagName(val: Value, mod: *Module) []const u8 { - const ip = &mod.intern_pool; - const enum_tag = switch (ip.indexToKey(val.toIntern())) { - .un => |un| ip.indexToKey(un.tag).enum_tag, - .enum_tag => |x| x, - .enum_literal => |name| return ip.stringToSlice(name), - else => unreachable, - }; - const enum_type = ip.indexToKey(enum_tag.ty).enum_type; - const field_index = field_index: { - const field_index = enum_type.tagValueIndex(ip, val.toIntern()).?; - break :field_index @intCast(u32, field_index); - }; - const field_name = enum_type.names[field_index]; - return ip.stringToSlice(field_name); - } - /// Asserts the value is an integer. pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst { return val.toBigIntAdvanced(space, mod, null) catch unreachable; @@ -2092,33 +2075,23 @@ pub const Value = struct { }; } - /// Valid only for error (union) types. Asserts the value is not undefined and not - /// unreachable. For error unions, prefer `errorUnionIsPayload` to find out whether - /// something is an error or not because it works without having to figure out the - /// string. - pub fn getError(val: Value, mod: *const Module) ?[]const u8 { - return switch (getErrorName(val, mod)) { - .empty => null, - else => |s| mod.intern_pool.stringToSlice(s), - }; - } - - pub fn getErrorName(val: Value, mod: *const Module) InternPool.NullTerminatedString { + /// Valid only for error (union) types. Asserts the value is not undefined and not unreachable. + pub fn getErrorName(val: Value, mod: *const Module) InternPool.OptionalNullTerminatedString { return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .err => |err| err.name, + .err => |err| err.name.toOptional(), .error_union => |error_union| switch (error_union.val) { - .err_name => |err_name| err_name, - .payload => .empty, + .err_name => |err_name| err_name.toOptional(), + .payload => .none, }, else => unreachable, }; } pub fn getErrorInt(val: Value, mod: *const Module) Module.ErrorInt { - return switch (getErrorName(val, mod)) { - .empty => 0, - else => |s| @intCast(Module.ErrorInt, mod.global_error_set.getIndex(s).?), - }; + return if (getErrorName(val, mod).unwrap()) |err_name| + @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err_name).?) + else + 0; } /// Assumes the type is an error union. Returns true if and only if the value is From 028f2ed30f3318fbe1d2274962d7cda111c695b6 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 12 Jun 2023 03:15:57 -0400 Subject: [PATCH 197/205] InternPool: fix one more compile error on 32-bit targets --- src/InternPool.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index d5cf2d3fbfc3..c208fcf18afb 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -5294,7 +5294,7 @@ pub fn getOrPutStringFmt( args: anytype, ) Allocator.Error!NullTerminatedString { // ensure that references to string_bytes in args do not get invalidated - const len = std.fmt.count(format, args) + 1; + const len = @intCast(usize, std.fmt.count(format, args) + 1); try ip.string_bytes.ensureUnusedCapacity(gpa, len); ip.string_bytes.writer(undefined).print(format, args) catch unreachable; ip.string_bytes.appendAssumeCapacity(0); From 9e61ba19e9cda344aa2094e9a671d98076164163 Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 12 Jun 2023 22:00:56 +0100 Subject: [PATCH 198/205] std.crypto.tls.Client: fix @memcpy crash in limitedOverlapCopy Resolves: #15928 --- lib/std/crypto/tls/Client.zig | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig index 2745bd4e6fab..5b9b00538a4d 100644 --- a/lib/std/crypto/tls/Client.zig +++ b/lib/std/crypto/tls/Client.zig @@ -1256,10 +1256,8 @@ fn limitedOverlapCopy(frag: []u8, in: usize) void { // A single, non-overlapping memcpy suffices. @memcpy(frag[0..first.len], first); } else { - // Need two memcpy calls because one alone would overlap. - @memcpy(frag[0..in], first[0..in]); - const leftover = first.len - in; - @memcpy(frag[in..][0..leftover], first[in..][0..leftover]); + // One memcpy call would overlap, so just do this instead. + std.mem.copyForwards(u8, frag, first); } } From 415dbe93d44a5f86deecc1501185d6bfd34f80b4 Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 12 Jun 2023 22:01:33 +0100 Subject: [PATCH 199/205] link: fix compile error with only-c --- src/link.zig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/link.zig b/src/link.zig index e43153f0b1fb..c184f7ed7cbb 100644 --- a/src/link.zig +++ b/src/link.zig @@ -562,10 +562,9 @@ pub const File = struct { /// May be called before or after updateDeclExports for any given Decl. pub fn updateFunc(base: *File, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) UpdateDeclError!void { - const func = module.funcPtr(func_index); if (build_options.only_c) { assert(base.tag == .c); - return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness); + return @fieldParentPtr(C, "base", base).updateFunc(module, func_index, air, liveness); } switch (base.tag) { // zig fmt: off From 1ec14988e146b185d87b09c62a1aa715a49047ab Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 12 Jun 2023 22:02:59 +0100 Subject: [PATCH 200/205] build: add -Dno-bin option This is useful for development, as it speeds up the process of getting semantic analysis errors significantly. --- build.zig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/build.zig b/build.zig index 847303340deb..9cfebebc562b 100644 --- a/build.zig +++ b/build.zig @@ -30,6 +30,7 @@ pub fn build(b: *std.Build) !void { const test_step = b.step("test", "Run all the tests"); const skip_install_lib_files = b.option(bool, "no-lib", "skip copying of lib/ files and langref to installation prefix. Useful for development") orelse false; const skip_install_langref = b.option(bool, "no-langref", "skip copying of langref to the installation prefix") orelse skip_install_lib_files; + const no_bin = b.option(bool, "no-bin", "skip emitting compiler binary") orelse false; const docgen_exe = b.addExecutable(.{ .name = "docgen", @@ -166,6 +167,7 @@ pub fn build(b: *std.Build) !void { exe.pie = pie; exe.sanitize_thread = sanitize_thread; exe.entitlements = entitlements; + if (no_bin) exe.emit_bin = .no_emit; exe.build_id = b.option( std.Build.Step.Compile.BuildId, From 52e7934a21e29b5a39fa207ef29520f58e311bb0 Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 12 Jun 2023 22:17:41 +0100 Subject: [PATCH 201/205] std.dwarf: fix findCompileUnit when ranges offset is given by const --- lib/std/dwarf.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig index dbe1f6e8b613..4de08b25d7c6 100644 --- a/lib/std/dwarf.zig +++ b/lib/std/dwarf.zig @@ -936,6 +936,7 @@ pub const DwarfInfo = struct { const ranges_val = compile_unit.die.getAttr(AT.ranges) orelse continue; const ranges_offset = switch (ranges_val.*) { .SecOffset => |off| off, + .Const => |c| try c.asUnsignedLe(), .RangeListOffset => |idx| off: { if (compile_unit.is_64) { const offset_loc = @intCast(usize, compile_unit.rnglists_base + 8 * idx); From 3d48c406c18d6bcc579130d7cac91d47cc119dd8 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 12 Jun 2023 20:30:11 -0400 Subject: [PATCH 202/205] Sema: redo monomorphed funcs to make more sense By correctly handling comptime-only types appearing in non-comptime parameters (when the parameter is either anytype or generic), this avoids an index out of bounds later when later filling out `monomorphed_args` using what used to be slightly different logic. --- src/Sema.zig | 180 ++++++++++++++++++++++++--------------------------- 1 file changed, 86 insertions(+), 94 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index cc79578931b3..c2b7467ed6fa 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6748,7 +6748,7 @@ fn analyzeCall( func, func_src, call_src, - func_ty_info, + func_ty, ensure_result_used, uncasted_args, call_tag, @@ -7367,8 +7367,16 @@ fn analyzeGenericCallArg( } } -fn analyzeGenericCallArgVal(sema: *Sema, block: *Block, arg_src: LazySrcLoc, uncasted_arg: Air.Inst.Ref) !Value { - return sema.resolveLazyValue(try sema.resolveValue(block, arg_src, uncasted_arg, "parameter is comptime")); +fn analyzeGenericCallArgVal( + sema: *Sema, + block: *Block, + arg_src: LazySrcLoc, + arg_ty: Type, + uncasted_arg: Air.Inst.Ref, + reason: []const u8, +) !Value { + const casted_arg = try sema.coerce(block, arg_ty, uncasted_arg, arg_src); + return sema.resolveLazyValue(try sema.resolveValue(block, arg_src, casted_arg, reason)); } fn instantiateGenericCall( @@ -7377,7 +7385,7 @@ fn instantiateGenericCall( func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, - func_ty_info: InternPool.Key.FuncType, + generic_func_ty: Type, ensure_result_used: bool, uncasted_args: []const Air.Inst.Ref, call_tag: Air.Inst.Tag, @@ -7404,24 +7412,25 @@ fn instantiateGenericCall( const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); const zir_tags = fn_zir.instructions.items(.tag); - const generic_args = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); + const monomorphed_args = try sema.arena.alloc(InternPool.Index, mod.typeToFunc(generic_func_ty).?.param_types.len); const callee_index = callee: { var arg_i: usize = 0; - var generic_arg_i: u32 = 0; + var monomorphed_arg_i: u32 = 0; var known_unique = false; for (fn_info.param_body) |inst| { + const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?; var is_comptime = false; var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7431,69 +7440,60 @@ fn instantiateGenericCall( } defer arg_i += 1; + const param_ty = generic_func_ty_info.param_types[arg_i]; + const is_generic = !is_anytype and param_ty == .generic_poison_type; + if (known_unique) { - if (is_comptime or is_anytype) { - generic_arg_i += 1; + if (is_comptime or is_anytype or is_generic) { + monomorphed_arg_i += 1; } continue; } - const arg_ty = sema.typeOf(uncasted_args[arg_i]); + const uncasted_arg = uncasted_args[arg_i]; + const arg_ty = if (is_generic) mod.monomorphed_funcs.getAdapted( + Module.MonomorphedFuncAdaptedKey{ + .func = module_fn_index, + .args = monomorphed_args[0..monomorphed_arg_i], + }, + Module.MonomorphedFuncsAdaptedContext{ .mod = mod }, + ) orelse { + known_unique = true; + monomorphed_arg_i += 1; + continue; + } else if (is_anytype) sema.typeOf(uncasted_arg).toIntern() else param_ty; + const was_comptime = is_comptime; + if (!is_comptime and try sema.typeRequiresComptime(arg_ty.toType())) is_comptime = true; if (is_comptime or is_anytype) { // Tuple default values are a part of the type and need to be // resolved to hash the type. - try sema.resolveTupleLazyValues(block, call_src, arg_ty); + try sema.resolveTupleLazyValues(block, call_src, arg_ty.toType()); } if (is_comptime) { - const arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, uncasted_args[arg_i]) catch |err| switch (err) { + const casted_arg = sema.analyzeGenericCallArgVal(block, .unneeded, arg_ty.toType(), uncasted_arg, "") catch |err| switch (err) { error.NeededSourceLocation => { const decl = mod.declPtr(block.src_decl); const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); - _ = try sema.analyzeGenericCallArgVal(block, arg_src, uncasted_args[arg_i]); + _ = try sema.analyzeGenericCallArgVal( + block, + arg_src, + arg_ty.toType(), + uncasted_arg, + if (was_comptime) + "parameter is comptime" + else + "argument to parameter with comptime-only type must be comptime-known", + ); unreachable; }, else => |e| return e, }; - - if (is_anytype) { - generic_args[generic_arg_i] = arg_val.toIntern(); - } else { - const final_arg_ty = mod.monomorphed_funcs.getAdapted( - Module.MonomorphedFuncAdaptedKey{ - .func = module_fn_index, - .args = generic_args[0..generic_arg_i], - }, - Module.MonomorphedFuncsAdaptedContext{ .mod = mod }, - ) orelse { - known_unique = true; - generic_arg_i += 1; - continue; - }; - const casted_arg = sema.coerce(block, final_arg_ty.toType(), uncasted_args[arg_i], .unneeded) catch |err| switch (err) { - error.NeededSourceLocation => { - const decl = mod.declPtr(block.src_decl); - const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); - _ = try sema.coerce(block, final_arg_ty.toType(), uncasted_args[arg_i], arg_src); - unreachable; - }, - else => |e| return e, - }; - const casted_arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, casted_arg) catch |err| switch (err) { - error.NeededSourceLocation => { - const decl = mod.declPtr(block.src_decl); - const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); - _ = try sema.analyzeGenericCallArgVal(block, arg_src, casted_arg); - unreachable; - }, - else => |e| return e, - }; - generic_args[generic_arg_i] = casted_arg_val.toIntern(); - } - generic_arg_i += 1; - } else if (is_anytype) { - generic_args[generic_arg_i] = arg_ty.toIntern(); - generic_arg_i += 1; + monomorphed_args[monomorphed_arg_i] = casted_arg.toIntern(); + monomorphed_arg_i += 1; + } else if (is_anytype or is_generic) { + monomorphed_args[monomorphed_arg_i] = try mod.intern(.{ .undef = arg_ty }); + monomorphed_arg_i += 1; } } @@ -7501,7 +7501,7 @@ fn instantiateGenericCall( if (mod.monomorphed_funcs.getAdapted( Module.MonomorphedFuncAdaptedKey{ .func = module_fn_index, - .args = generic_args[0..generic_arg_i], + .args = monomorphed_args[0..monomorphed_arg_i], }, Module.MonomorphedFuncsAdaptedContext{ .mod = mod }, )) |callee_func| break :callee mod.intern_pool.indexToKey(callee_func).func.index; @@ -7550,11 +7550,11 @@ fn instantiateGenericCall( new_decl, new_decl_index, uncasted_args, - generic_arg_i, + monomorphed_arg_i, module_fn_index, new_module_func_index, namespace_index, - func_ty_info, + generic_func_ty, call_src, bound_arg_src, ) catch |err| switch (err) { @@ -7673,11 +7673,11 @@ fn resolveGenericInstantiationType( new_decl: *Decl, new_decl_index: Decl.Index, uncasted_args: []const Air.Inst.Ref, - generic_args_len: u32, + monomorphed_args_len: u32, module_fn_index: Module.Fn.Index, new_module_func: Module.Fn.Index, namespace: Namespace.Index, - func_ty_info: InternPool.Key.FuncType, + generic_func_ty: Type, call_src: LazySrcLoc, bound_arg_src: ?LazySrcLoc, ) !Module.Fn.Index { @@ -7737,18 +7737,19 @@ fn resolveGenericInstantiationType( var arg_i: usize = 0; for (fn_info.param_body) |inst| { + const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?; var is_comptime = false; var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7802,25 +7803,26 @@ fn resolveGenericInstantiationType( const new_func = new_func_val.getFunctionIndex(mod).unwrap().?; assert(new_func == new_module_func); - const generic_args_index = @intCast(u32, mod.monomorphed_func_keys.items.len); - const generic_args = try mod.monomorphed_func_keys.addManyAsSlice(gpa, generic_args_len); - var generic_arg_i: u32 = 0; - try mod.monomorphed_funcs.ensureUnusedCapacityContext(gpa, generic_args_len + 1, .{ .mod = mod }); + const monomorphed_args_index = @intCast(u32, mod.monomorphed_func_keys.items.len); + const monomorphed_args = try mod.monomorphed_func_keys.addManyAsSlice(gpa, monomorphed_args_len); + var monomorphed_arg_i: u32 = 0; + try mod.monomorphed_funcs.ensureUnusedCapacityContext(gpa, monomorphed_args_len + 1, .{ .mod = mod }); arg_i = 0; for (fn_info.param_body) |inst| { + const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?; var is_comptime = false; var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7829,40 +7831,30 @@ fn resolveGenericInstantiationType( else => continue, } - // We populate the Type here regardless because it is needed by - // `GenericCallAdapter.eql` as well as function body analysis. - // Whether it is anytype is communicated by `isAnytypeParam`. + const param_ty = generic_func_ty_info.param_types[arg_i]; + const is_generic = !is_anytype and param_ty == .generic_poison_type; + const arg = child_sema.inst_map.get(inst).?; const arg_ty = child_sema.typeOf(arg); - if (try sema.typeRequiresComptime(arg_ty)) { - is_comptime = true; - } + if (is_generic) if (mod.monomorphed_funcs.fetchPutAssumeCapacityContext(.{ + .func = module_fn_index, + .args_index = monomorphed_args_index, + .args_len = monomorphed_arg_i, + }, arg_ty.toIntern(), .{ .mod = mod })) |kv| assert(kv.value == arg_ty.toIntern()); + if (!is_comptime and try sema.typeRequiresComptime(arg_ty)) is_comptime = true; if (is_comptime) { const arg_val = (child_sema.resolveMaybeUndefValAllowVariables(arg) catch unreachable).?; - if (!is_anytype) { - if (mod.monomorphed_funcs.fetchPutAssumeCapacityContext(.{ - .func = module_fn_index, - .args_index = generic_args_index, - .args_len = generic_arg_i, - }, arg_ty.toIntern(), .{ .mod = mod })) |kv| assert(kv.value == arg_ty.toIntern()); - } - generic_args[generic_arg_i] = arg_val.toIntern(); - generic_arg_i += 1; - child_sema.comptime_args[arg_i] = .{ - .ty = arg_ty, - .val = (try arg_val.intern(arg_ty, mod)).toValue(), - }; + monomorphed_args[monomorphed_arg_i] = arg_val.toIntern(); + monomorphed_arg_i += 1; + child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, .val = arg_val }; } else { - if (is_anytype) { - generic_args[generic_arg_i] = arg_ty.toIntern(); - generic_arg_i += 1; + if (is_anytype or is_generic) { + monomorphed_args[monomorphed_arg_i] = try mod.intern(.{ .undef = arg_ty.toIntern() }); + monomorphed_arg_i += 1; } - child_sema.comptime_args[arg_i] = .{ - .ty = arg_ty, - .val = Value.generic_poison, - }; + child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, .val = Value.generic_poison }; } arg_i += 1; @@ -7895,8 +7887,8 @@ fn resolveGenericInstantiationType( mod.monomorphed_funcs.putAssumeCapacityNoClobberContext(.{ .func = module_fn_index, - .args_index = generic_args_index, - .args_len = generic_arg_i, + .args_index = monomorphed_args_index, + .args_len = monomorphed_arg_i, }, new_decl.val.toIntern(), .{ .mod = mod }); // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field From ff35a180dd1b55d601c862f6a1bc14e3d6dbf25c Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Jun 2023 01:17:50 +0100 Subject: [PATCH 203/205] Sema: intern values from resolved inferred allocs --- src/Sema.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Sema.zig b/src/Sema.zig index c2b7467ed6fa..aa04c40fd060 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5152,6 +5152,7 @@ fn storeToInferredAllocComptime( operand_val, iac.alignment.toByteUnits(0), ); + try sema.comptime_mutable_decls.append(iac.decl_index); return; } From 4b7c1e5c300c471618c9b12646247ef887a3a576 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 13 Jun 2023 02:18:01 +0100 Subject: [PATCH 204/205] tools: add LLDB pretty printer for InternPool.NullTerminatedString --- tools/lldb_pretty_printers.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 3d57adee707e..4f2d2b03cbee 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -478,6 +478,24 @@ def get_child_index(self, name): except: return -1 def get_child_at_index(self, index): return (self.tag, self.data, self.trailing)[index] if index in range(3) else None +def InternPool_NullTerminatedString_SummaryProvider(value, _=None): + try: + ip = InternPool_Find(value.thread) + if not ip: return + items = ip.GetChildMemberWithName('string_bytes').GetChildMemberWithName('items') + b = bytearray() + i = 0 + while True: + x = items.GetChildAtIndex(value.unsigned + i).GetValueAsUnsigned() + if x == 0: break + b.append(x) + i += 1 + s = b.decode(encoding='utf8', errors='backslashreplace') + s1 = s if s.isprintable() else ''.join((c if c.isprintable() else '\\x%02x' % ord(c) for c in s)) + return '"%s"' % s1 + except: + pass + def type_Type_pointer(payload): pointee_type = payload.GetChildMemberWithName('pointee_type') sentinel = payload.GetChildMemberWithName('sentinel').GetChildMemberWithName('child') @@ -690,6 +708,7 @@ def __lldb_init_module(debugger, _=None): add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True) add(debugger, category='zig.stage2', type='Module.LazySrcLoc', identifier='zig_TaggedUnion', synth=True) add(debugger, category='zig.stage2', type='InternPool.Index', synth=True) + add(debugger, category='zig.stage2', type='InternPool.NullTerminatedString', summary=True) add(debugger, category='zig.stage2', type='InternPool.Key', identifier='zig_TaggedUnion', synth=True) add(debugger, category='zig.stage2', type='InternPool.Key.Int.Storage', identifier='zig_TaggedUnion', synth=True) add(debugger, category='zig.stage2', type='InternPool.Key.ErrorUnion.Value', identifier='zig_TaggedUnion', synth=True) From 2ad073ec6d4e2be967f18c9907844404a7eed42e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jun 2023 19:09:30 -0700 Subject: [PATCH 205/205] link/Plan9: fix UAF of symbol names Long term, linker backends will need to manage their own string tables for things like this because my mandate is: no long-lived pointers allowed in any of the codepaths touched by incremental compilation, so that we can serialize and deserialize trivially. Short term, I solved this with a couple calls to Allocator.dupe, incurring some harmless leaks. --- src/link/Plan9.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 2606dd7aac78..c08754b57a8b 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -441,7 +441,7 @@ fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void { const sym: aout.Sym = .{ .value = undefined, // the value of stuff gets filled in in flushModule .type = decl_block.type, - .name = mod.intern_pool.stringToSlice(decl.name), + .name = try self.base.allocator.dupe(u8, mod.intern_pool.stringToSlice(decl.name)), }; if (decl_block.sym_index) |s| { @@ -741,7 +741,7 @@ fn addDeclExports( const sym = .{ .value = decl_block.offset.?, .type = decl_block.type.toGlobal(), - .name = exp_name, + .name = try self.base.allocator.dupe(u8, exp_name), }; if (metadata.getExport(self, exp_name)) |i| {